2024-11-20 17:20:57,258 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 17:20:57,273 main DEBUG Took 0.012497 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 17:20:57,273 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 17:20:57,273 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 17:20:57,274 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 17:20:57,276 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,282 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 17:20:57,294 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,295 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,296 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,296 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,297 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,298 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,298 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,299 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,299 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,300 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,300 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,301 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,302 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,302 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,303 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,304 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,304 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:20:57,304 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,305 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 17:20:57,306 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:20:57,307 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 17:20:57,309 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 17:20:57,309 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 17:20:57,310 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 17:20:57,311 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 17:20:57,319 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 17:20:57,321 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 17:20:57,323 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 17:20:57,323 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 17:20:57,324 main DEBUG createAppenders(={Console}) 2024-11-20 17:20:57,324 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 17:20:57,325 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 17:20:57,325 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 17:20:57,325 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 17:20:57,326 main DEBUG OutputStream closed 2024-11-20 17:20:57,326 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 17:20:57,326 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 17:20:57,326 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 17:20:57,400 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 17:20:57,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 17:20:57,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 17:20:57,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 17:20:57,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 17:20:57,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 17:20:57,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 17:20:57,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 17:20:57,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 17:20:57,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 17:20:57,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 17:20:57,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 17:20:57,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 17:20:57,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 17:20:57,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 17:20:57,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 17:20:57,408 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 17:20:57,408 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 17:20:57,411 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 17:20:57,411 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 17:20:57,412 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 17:20:57,413 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T17:20:57,673 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9 2024-11-20 17:20:57,675 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 17:20:57,676 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T17:20:57,685 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-20T17:20:57,704 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T17:20:57,707 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121, deleteOnExit=true 2024-11-20T17:20:57,707 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T17:20:57,708 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/test.cache.data in system properties and HBase conf 2024-11-20T17:20:57,708 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T17:20:57,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/hadoop.log.dir in system properties and HBase conf 2024-11-20T17:20:57,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T17:20:57,710 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T17:20:57,710 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T17:20:57,813 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T17:20:57,908 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T17:20:57,912 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T17:20:57,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T17:20:57,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T17:20:57,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T17:20:57,914 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T17:20:57,914 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T17:20:57,915 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T17:20:57,915 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T17:20:57,915 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T17:20:57,916 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/nfs.dump.dir in system properties and HBase conf 2024-11-20T17:20:57,916 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/java.io.tmpdir in system properties and HBase conf 2024-11-20T17:20:57,917 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T17:20:57,917 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T17:20:57,917 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T17:20:58,723 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T17:20:58,803 INFO [Time-limited test {}] log.Log(170): Logging initialized @2244ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T17:20:58,880 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T17:20:58,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T17:20:58,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T17:20:58,970 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T17:20:58,972 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T17:20:58,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T17:20:58,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/hadoop.log.dir/,AVAILABLE} 2024-11-20T17:20:58,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T17:20:59,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/java.io.tmpdir/jetty-localhost-37617-hadoop-hdfs-3_4_1-tests_jar-_-any-1806803727339703646/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T17:20:59,235 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:37617} 2024-11-20T17:20:59,235 INFO [Time-limited test {}] server.Server(415): Started @2677ms 2024-11-20T17:20:59,641 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T17:20:59,648 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T17:20:59,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T17:20:59,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T17:20:59,649 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T17:20:59,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/hadoop.log.dir/,AVAILABLE} 2024-11-20T17:20:59,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T17:20:59,771 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/java.io.tmpdir/jetty-localhost-40997-hadoop-hdfs-3_4_1-tests_jar-_-any-2145703310908188004/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T17:20:59,772 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:40997} 2024-11-20T17:20:59,772 INFO [Time-limited test {}] server.Server(415): Started @3214ms 2024-11-20T17:20:59,829 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T17:21:00,302 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/dfs/data/data1/current/BP-301989904-172.17.0.2-1732123258485/current, will proceed with Du for space computation calculation, 2024-11-20T17:21:00,302 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/dfs/data/data2/current/BP-301989904-172.17.0.2-1732123258485/current, will proceed with Du for space computation calculation, 2024-11-20T17:21:00,342 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T17:21:00,398 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x585eaa1e5c6fe1dd with lease ID 0x8b9089acc0a9d31f: Processing first storage report for DS-ceb73bb7-f5ce-4e71-a1d0-be03caa67ebc from datanode DatanodeRegistration(127.0.0.1:40823, datanodeUuid=6fd58b4a-7db6-42c8-a040-22f97fcd7c4e, infoPort=44543, infoSecurePort=0, ipcPort=38817, storageInfo=lv=-57;cid=testClusterID;nsid=1214748596;c=1732123258485) 2024-11-20T17:21:00,399 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x585eaa1e5c6fe1dd with lease ID 0x8b9089acc0a9d31f: from storage DS-ceb73bb7-f5ce-4e71-a1d0-be03caa67ebc node DatanodeRegistration(127.0.0.1:40823, datanodeUuid=6fd58b4a-7db6-42c8-a040-22f97fcd7c4e, infoPort=44543, infoSecurePort=0, ipcPort=38817, storageInfo=lv=-57;cid=testClusterID;nsid=1214748596;c=1732123258485), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T17:21:00,400 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x585eaa1e5c6fe1dd with lease ID 0x8b9089acc0a9d31f: Processing first storage report for DS-d9a7478e-21c9-45a5-83d9-87e76754b778 from datanode DatanodeRegistration(127.0.0.1:40823, datanodeUuid=6fd58b4a-7db6-42c8-a040-22f97fcd7c4e, infoPort=44543, infoSecurePort=0, ipcPort=38817, storageInfo=lv=-57;cid=testClusterID;nsid=1214748596;c=1732123258485) 2024-11-20T17:21:00,400 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x585eaa1e5c6fe1dd with lease ID 0x8b9089acc0a9d31f: from storage DS-d9a7478e-21c9-45a5-83d9-87e76754b778 node DatanodeRegistration(127.0.0.1:40823, datanodeUuid=6fd58b4a-7db6-42c8-a040-22f97fcd7c4e, infoPort=44543, infoSecurePort=0, ipcPort=38817, storageInfo=lv=-57;cid=testClusterID;nsid=1214748596;c=1732123258485), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T17:21:00,495 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9 2024-11-20T17:21:00,581 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/zookeeper_0, clientPort=55266, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T17:21:00,591 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55266 2024-11-20T17:21:00,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:00,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:00,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741825_1001 (size=7) 2024-11-20T17:21:01,260 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 with version=8 2024-11-20T17:21:01,260 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/hbase-staging 2024-11-20T17:21:01,390 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T17:21:01,663 INFO [Time-limited test {}] client.ConnectionUtils(129): master/d514dc944523:0 server-side Connection retries=45 2024-11-20T17:21:01,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:21:01,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T17:21:01,683 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T17:21:01,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:21:01,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T17:21:01,815 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T17:21:01,876 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T17:21:01,886 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T17:21:01,890 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T17:21:01,918 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20496 (auto-detected) 2024-11-20T17:21:01,919 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T17:21:01,939 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38505 2024-11-20T17:21:01,948 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:01,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:01,963 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38505 connecting to ZooKeeper ensemble=127.0.0.1:55266 2024-11-20T17:21:01,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385050x0, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T17:21:01,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38505-0x10015f32f730000 connected 2024-11-20T17:21:02,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T17:21:02,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:21:02,030 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T17:21:02,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38505 2024-11-20T17:21:02,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38505 2024-11-20T17:21:02,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38505 2024-11-20T17:21:02,040 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38505 2024-11-20T17:21:02,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38505 2024-11-20T17:21:02,049 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0, hbase.cluster.distributed=false 2024-11-20T17:21:02,111 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/d514dc944523:0 server-side Connection retries=45 2024-11-20T17:21:02,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:21:02,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T17:21:02,112 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T17:21:02,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:21:02,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T17:21:02,115 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T17:21:02,117 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T17:21:02,118 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40121 2024-11-20T17:21:02,119 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T17:21:02,125 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T17:21:02,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:02,129 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:02,132 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40121 connecting to ZooKeeper ensemble=127.0.0.1:55266 2024-11-20T17:21:02,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:401210x0, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T17:21:02,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:401210x0, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T17:21:02,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40121-0x10015f32f730001 connected 2024-11-20T17:21:02,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:21:02,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T17:21:02,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40121 2024-11-20T17:21:02,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40121 2024-11-20T17:21:02,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40121 2024-11-20T17:21:02,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40121 2024-11-20T17:21:02,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40121 2024-11-20T17:21:02,149 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/d514dc944523,38505,1732123261383 2024-11-20T17:21:02,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:21:02,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:21:02,159 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d514dc944523,38505,1732123261383 2024-11-20T17:21:02,167 DEBUG [M:0;d514dc944523:38505 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d514dc944523:38505 2024-11-20T17:21:02,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T17:21:02,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T17:21:02,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:02,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:02,184 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T17:21:02,184 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T17:21:02,185 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d514dc944523,38505,1732123261383 from backup master directory 2024-11-20T17:21:02,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:21:02,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d514dc944523,38505,1732123261383 2024-11-20T17:21:02,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:21:02,189 WARN [master/d514dc944523:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T17:21:02,189 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d514dc944523,38505,1732123261383 2024-11-20T17:21:02,191 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T17:21:02,193 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T17:21:02,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741826_1002 (size=42) 2024-11-20T17:21:02,263 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/hbase.id with ID: 040206f9-fb7c-4aab-8bd5-015365fde8f8 2024-11-20T17:21:02,303 INFO [master/d514dc944523:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:21:02,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:02,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:02,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741827_1003 (size=196) 2024-11-20T17:21:02,362 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:21:02,364 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T17:21:02,382 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:02,387 INFO [master/d514dc944523:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T17:21:02,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741828_1004 (size=1189) 2024-11-20T17:21:02,836 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store 2024-11-20T17:21:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741829_1005 (size=34) 2024-11-20T17:21:03,257 INFO [master/d514dc944523:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T17:21:03,257 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:03,258 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T17:21:03,259 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:21:03,259 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:21:03,259 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T17:21:03,259 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:21:03,259 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:21:03,259 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T17:21:03,262 WARN [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/.initializing 2024-11-20T17:21:03,262 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/WALs/d514dc944523,38505,1732123261383 2024-11-20T17:21:03,268 INFO [master/d514dc944523:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T17:21:03,279 INFO [master/d514dc944523:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d514dc944523%2C38505%2C1732123261383, suffix=, logDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/WALs/d514dc944523,38505,1732123261383, archiveDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/oldWALs, maxLogs=10 2024-11-20T17:21:03,302 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/WALs/d514dc944523,38505,1732123261383/d514dc944523%2C38505%2C1732123261383.1732123263284, exclude list is [], retry=0 2024-11-20T17:21:03,319 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-ceb73bb7-f5ce-4e71-a1d0-be03caa67ebc,DISK] 2024-11-20T17:21:03,322 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T17:21:03,357 INFO [master/d514dc944523:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/WALs/d514dc944523,38505,1732123261383/d514dc944523%2C38505%2C1732123261383.1732123263284 2024-11-20T17:21:03,358 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44543:44543)] 2024-11-20T17:21:03,359 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:21:03,359 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:03,362 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,363 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T17:21:03,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:03,431 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:03,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T17:21:03,435 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:03,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:03,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T17:21:03,440 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:03,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:03,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,443 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T17:21:03,444 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:03,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:03,449 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,450 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,458 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T17:21:03,463 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:21:03,467 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:21:03,469 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59215791, jitterRate=-0.1176159530878067}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T17:21:03,474 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T17:21:03,475 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T17:21:03,505 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2835dcfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:03,541 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T17:21:03,554 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T17:21:03,554 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T17:21:03,557 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T17:21:03,558 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T17:21:03,564 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-11-20T17:21:03,564 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T17:21:03,598 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T17:21:03,610 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T17:21:03,613 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T17:21:03,615 INFO [master/d514dc944523:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T17:21:03,617 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T17:21:03,618 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T17:21:03,620 INFO [master/d514dc944523:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T17:21:03,624 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T17:21:03,626 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T17:21:03,627 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T17:21:03,629 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T17:21:03,638 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T17:21:03,640 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T17:21:03,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T17:21:03,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T17:21:03,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:03,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:03,645 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=d514dc944523,38505,1732123261383, sessionid=0x10015f32f730000, setting cluster-up flag (Was=false) 2024-11-20T17:21:03,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:03,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:03,662 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T17:21:03,664 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d514dc944523,38505,1732123261383 2024-11-20T17:21:03,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:03,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:03,675 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T17:21:03,676 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d514dc944523,38505,1732123261383 2024-11-20T17:21:03,755 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T17:21:03,761 INFO [master/d514dc944523:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T17:21:03,763 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d514dc944523:40121 2024-11-20T17:21:03,763 INFO [master/d514dc944523:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T17:21:03,765 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1008): ClusterId : 040206f9-fb7c-4aab-8bd5-015365fde8f8 2024-11-20T17:21:03,767 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T17:21:03,769 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d514dc944523,38505,1732123261383 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T17:21:03,772 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T17:21:03,773 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T17:21:03,773 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:21:03,773 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:21:03,773 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:21:03,774 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:21:03,774 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d514dc944523:0, corePoolSize=10, maxPoolSize=10 2024-11-20T17:21:03,774 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,774 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d514dc944523:0, corePoolSize=2, maxPoolSize=2 2024-11-20T17:21:03,774 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,775 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732123293775 2024-11-20T17:21:03,776 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T17:21:03,776 DEBUG [RS:0;d514dc944523:40121 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cbae0dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:03,777 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T17:21:03,778 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T17:21:03,778 DEBUG [RS:0;d514dc944523:40121 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a2280bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d514dc944523/172.17.0.2:0 2024-11-20T17:21:03,779 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T17:21:03,780 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T17:21:03,782 INFO [RS:0;d514dc944523:40121 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T17:21:03,782 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T17:21:03,782 INFO [RS:0;d514dc944523:40121 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T17:21:03,782 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T17:21:03,782 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T17:21:03,783 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T17:21:03,783 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T17:21:03,783 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,784 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(3073): reportForDuty to master=d514dc944523,38505,1732123261383 with isa=d514dc944523/172.17.0.2:40121, startcode=1732123262111 2024-11-20T17:21:03,785 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:03,785 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T17:21:03,786 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T17:21:03,787 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T17:21:03,787 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T17:21:03,789 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T17:21:03,790 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T17:21:03,791 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.large.0-1732123263791,5,FailOnTimeoutGroup] 2024-11-20T17:21:03,796 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.small.0-1732123263792,5,FailOnTimeoutGroup] 2024-11-20T17:21:03,796 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,796 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T17:21:03,797 DEBUG [RS:0;d514dc944523:40121 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T17:21:03,798 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,798 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741831_1007 (size=1039) 2024-11-20T17:21:03,835 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51267, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T17:21:03,843 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38505 {}] master.ServerManager(332): Checking decommissioned status of RegionServer d514dc944523,40121,1732123262111 2024-11-20T17:21:03,846 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38505 {}] master.ServerManager(486): Registering regionserver=d514dc944523,40121,1732123262111 2024-11-20T17:21:03,860 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:21:03,861 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41637 2024-11-20T17:21:03,861 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T17:21:03,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T17:21:03,866 DEBUG [RS:0;d514dc944523:40121 {}] zookeeper.ZKUtil(111): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d514dc944523,40121,1732123262111 2024-11-20T17:21:03,866 WARN [RS:0;d514dc944523:40121 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T17:21:03,866 INFO [RS:0;d514dc944523:40121 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T17:21:03,866 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111 2024-11-20T17:21:03,868 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d514dc944523,40121,1732123262111] 2024-11-20T17:21:03,879 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T17:21:03,890 INFO [RS:0;d514dc944523:40121 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T17:21:03,904 INFO [RS:0;d514dc944523:40121 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T17:21:03,906 INFO [RS:0;d514dc944523:40121 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T17:21:03,906 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,907 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T17:21:03,914 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,914 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,915 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,915 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,915 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,915 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,915 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d514dc944523:0, corePoolSize=2, maxPoolSize=2 2024-11-20T17:21:03,915 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,916 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,916 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,916 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,916 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:21:03,916 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d514dc944523:0, corePoolSize=3, maxPoolSize=3 2024-11-20T17:21:03,916 DEBUG [RS:0;d514dc944523:40121 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0, corePoolSize=3, maxPoolSize=3 2024-11-20T17:21:03,918 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,918 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,919 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,919 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,919 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,40121,1732123262111-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T17:21:03,939 INFO [RS:0;d514dc944523:40121 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T17:21:03,941 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,40121,1732123262111-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:03,966 INFO [RS:0;d514dc944523:40121 {}] regionserver.Replication(204): d514dc944523,40121,1732123262111 started 2024-11-20T17:21:03,967 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1767): Serving as d514dc944523,40121,1732123262111, RpcServer on d514dc944523/172.17.0.2:40121, sessionid=0x10015f32f730001 2024-11-20T17:21:03,967 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T17:21:03,968 DEBUG [RS:0;d514dc944523:40121 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d514dc944523,40121,1732123262111 2024-11-20T17:21:03,968 DEBUG [RS:0;d514dc944523:40121 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd514dc944523,40121,1732123262111' 2024-11-20T17:21:03,968 DEBUG [RS:0;d514dc944523:40121 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T17:21:03,969 DEBUG [RS:0;d514dc944523:40121 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T17:21:03,970 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T17:21:03,970 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T17:21:03,970 DEBUG [RS:0;d514dc944523:40121 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d514dc944523,40121,1732123262111 2024-11-20T17:21:03,970 DEBUG [RS:0;d514dc944523:40121 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd514dc944523,40121,1732123262111' 2024-11-20T17:21:03,970 DEBUG [RS:0;d514dc944523:40121 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T17:21:03,970 DEBUG [RS:0;d514dc944523:40121 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T17:21:03,971 DEBUG [RS:0;d514dc944523:40121 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T17:21:03,971 INFO [RS:0;d514dc944523:40121 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T17:21:03,971 INFO [RS:0;d514dc944523:40121 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T17:21:04,077 INFO [RS:0;d514dc944523:40121 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T17:21:04,082 INFO [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d514dc944523%2C40121%2C1732123262111, suffix=, logDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111, archiveDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/oldWALs, maxLogs=32 2024-11-20T17:21:04,102 DEBUG [RS:0;d514dc944523:40121 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111/d514dc944523%2C40121%2C1732123262111.1732123264084, exclude list is [], retry=0 2024-11-20T17:21:04,109 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-ceb73bb7-f5ce-4e71-a1d0-be03caa67ebc,DISK] 2024-11-20T17:21:04,114 INFO [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111/d514dc944523%2C40121%2C1732123262111.1732123264084 2024-11-20T17:21:04,114 DEBUG [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44543:44543)] 2024-11-20T17:21:04,202 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T17:21:04,202 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:21:04,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741833_1009 (size=32) 2024-11-20T17:21:04,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:04,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T17:21:04,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T17:21:04,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:04,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:04,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T17:21:04,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T17:21:04,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:04,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:04,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T17:21:04,626 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T17:21:04,626 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:04,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:04,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740 2024-11-20T17:21:04,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740 2024-11-20T17:21:04,633 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:21:04,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T17:21:04,640 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:21:04,641 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58901449, jitterRate=-0.12230001389980316}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:21:04,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T17:21:04,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T17:21:04,643 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T17:21:04,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T17:21:04,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T17:21:04,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T17:21:04,645 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T17:21:04,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T17:21:04,647 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T17:21:04,647 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T17:21:04,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T17:21:04,662 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T17:21:04,664 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T17:21:04,816 DEBUG [d514dc944523:38505 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T17:21:04,821 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:04,826 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d514dc944523,40121,1732123262111, state=OPENING 2024-11-20T17:21:04,831 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T17:21:04,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:04,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:04,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:21:04,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:21:04,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:05,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:05,012 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T17:21:05,016 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T17:21:05,027 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T17:21:05,027 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T17:21:05,028 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T17:21:05,031 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d514dc944523%2C40121%2C1732123262111.meta, suffix=.meta, logDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111, archiveDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/oldWALs, maxLogs=32 2024-11-20T17:21:05,048 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111/d514dc944523%2C40121%2C1732123262111.meta.1732123265033.meta, exclude list is [], retry=0 2024-11-20T17:21:05,052 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-ceb73bb7-f5ce-4e71-a1d0-be03caa67ebc,DISK] 2024-11-20T17:21:05,056 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/WALs/d514dc944523,40121,1732123262111/d514dc944523%2C40121%2C1732123262111.meta.1732123265033.meta 2024-11-20T17:21:05,056 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44543:44543)] 2024-11-20T17:21:05,057 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:21:05,058 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T17:21:05,117 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T17:21:05,122 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T17:21:05,126 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T17:21:05,126 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:05,127 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T17:21:05,127 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T17:21:05,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T17:21:05,132 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T17:21:05,132 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:05,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:05,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T17:21:05,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T17:21:05,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:05,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:05,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T17:21:05,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T17:21:05,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:05,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:21:05,139 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740 2024-11-20T17:21:05,142 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740 2024-11-20T17:21:05,144 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:21:05,147 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T17:21:05,148 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62286322, jitterRate=-0.07186147570610046}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:21:05,150 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T17:21:05,157 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732123265005 2024-11-20T17:21:05,168 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T17:21:05,169 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T17:21:05,170 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:05,172 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d514dc944523,40121,1732123262111, state=OPEN 2024-11-20T17:21:05,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T17:21:05,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T17:21:05,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:21:05,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:21:05,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T17:21:05,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=d514dc944523,40121,1732123262111 in 342 msec 2024-11-20T17:21:05,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T17:21:05,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 530 msec 2024-11-20T17:21:05,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4820 sec 2024-11-20T17:21:05,193 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732123265193, completionTime=-1 2024-11-20T17:21:05,194 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T17:21:05,194 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T17:21:05,232 DEBUG [hconnection-0x5602a74-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:05,235 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:05,245 INFO [master/d514dc944523:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T17:21:05,245 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732123325245 2024-11-20T17:21:05,245 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732123385245 2024-11-20T17:21:05,245 INFO [master/d514dc944523:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-11-20T17:21:05,267 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,38505,1732123261383-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:05,268 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,38505,1732123261383-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:05,268 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,38505,1732123261383-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:05,270 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d514dc944523:38505, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:05,270 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:05,275 DEBUG [master/d514dc944523:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T17:21:05,278 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T17:21:05,280 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T17:21:05,287 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T17:21:05,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:21:05,291 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:05,293 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:21:05,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741835_1011 (size=358) 2024-11-20T17:21:05,707 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5ae1ceb1863550e6bded974b57fd057c, NAME => 'hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:21:05,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741836_1012 (size=42) 2024-11-20T17:21:06,117 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:06,117 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 5ae1ceb1863550e6bded974b57fd057c, disabling compactions & flushes 2024-11-20T17:21:06,118 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,118 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,118 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. after waiting 0 ms 2024-11-20T17:21:06,118 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,118 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,118 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5ae1ceb1863550e6bded974b57fd057c: 2024-11-20T17:21:06,120 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:21:06,127 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732123266121"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123266121"}]},"ts":"1732123266121"} 2024-11-20T17:21:06,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:21:06,153 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:21:06,156 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123266153"}]},"ts":"1732123266153"} 2024-11-20T17:21:06,160 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T17:21:06,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=5ae1ceb1863550e6bded974b57fd057c, ASSIGN}] 2024-11-20T17:21:06,169 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=5ae1ceb1863550e6bded974b57fd057c, ASSIGN 2024-11-20T17:21:06,170 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=5ae1ceb1863550e6bded974b57fd057c, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:21:06,321 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=5ae1ceb1863550e6bded974b57fd057c, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:06,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 5ae1ceb1863550e6bded974b57fd057c, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:06,479 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:06,485 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,485 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 5ae1ceb1863550e6bded974b57fd057c, NAME => 'hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:21:06,486 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,486 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:06,486 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,486 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,488 INFO [StoreOpener-5ae1ceb1863550e6bded974b57fd057c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,491 INFO [StoreOpener-5ae1ceb1863550e6bded974b57fd057c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ae1ceb1863550e6bded974b57fd057c columnFamilyName info 2024-11-20T17:21:06,491 DEBUG [StoreOpener-5ae1ceb1863550e6bded974b57fd057c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:06,492 INFO [StoreOpener-5ae1ceb1863550e6bded974b57fd057c-1 {}] regionserver.HStore(327): Store=5ae1ceb1863550e6bded974b57fd057c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:06,493 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,494 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,498 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:21:06,501 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:21:06,502 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 5ae1ceb1863550e6bded974b57fd057c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60961102, jitterRate=-0.09160879254341125}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T17:21:06,504 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 5ae1ceb1863550e6bded974b57fd057c: 2024-11-20T17:21:06,506 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c., pid=6, masterSystemTime=1732123266479 2024-11-20T17:21:06,509 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,509 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:21:06,510 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=5ae1ceb1863550e6bded974b57fd057c, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:06,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T17:21:06,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 5ae1ceb1863550e6bded974b57fd057c, server=d514dc944523,40121,1732123262111 in 188 msec 2024-11-20T17:21:06,520 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T17:21:06,520 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=5ae1ceb1863550e6bded974b57fd057c, ASSIGN in 351 msec 2024-11-20T17:21:06,521 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:21:06,522 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123266522"}]},"ts":"1732123266522"} 2024-11-20T17:21:06,524 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T17:21:06,528 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:21:06,531 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2470 sec 2024-11-20T17:21:06,591 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T17:21:06,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T17:21:06,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:06,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:21:06,624 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T17:21:06,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T17:21:06,644 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-11-20T17:21:06,648 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T17:21:06,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T17:21:06,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 14 msec 2024-11-20T17:21:06,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T17:21:06,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T17:21:06,677 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.488sec 2024-11-20T17:21:06,678 INFO [master/d514dc944523:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T17:21:06,680 INFO [master/d514dc944523:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T17:21:06,680 INFO [master/d514dc944523:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T17:21:06,681 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T17:21:06,681 INFO [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T17:21:06,682 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,38505,1732123261383-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T17:21:06,682 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,38505,1732123261383-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T17:21:06,689 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T17:21:06,689 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T17:21:06,690 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,38505,1732123261383-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:21:06,768 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-11-20T17:21:06,769 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T17:21:06,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:06,781 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T17:21:06,781 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T17:21:06,790 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:06,798 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:06,807 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=d514dc944523,38505,1732123261383 2024-11-20T17:21:06,824 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=139, ProcessCount=11, AvailableMemoryMB=6890 2024-11-20T17:21:06,835 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:21:06,837 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:21:06,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:21:06,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:21:06,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:06,875 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:21:06,876 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:06,878 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:21:06,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T17:21:06,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:21:06,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741837_1013 (size=960) 2024-11-20T17:21:06,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:21:07,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:21:07,299 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:21:07,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741838_1014 (size=53) 2024-11-20T17:21:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:21:07,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:07,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d436a1ae301ec26cf78d29bd05a18bd2, disabling compactions & flushes 2024-11-20T17:21:07,710 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:07,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:07,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. after waiting 0 ms 2024-11-20T17:21:07,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:07,710 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:07,710 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:07,712 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:21:07,713 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123267712"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123267712"}]},"ts":"1732123267712"} 2024-11-20T17:21:07,716 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:21:07,717 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:21:07,717 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123267717"}]},"ts":"1732123267717"} 2024-11-20T17:21:07,720 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:21:07,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, ASSIGN}] 2024-11-20T17:21:07,727 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, ASSIGN 2024-11-20T17:21:07,728 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:21:07,879 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d436a1ae301ec26cf78d29bd05a18bd2, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:07,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:21:08,036 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:08,042 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:08,042 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:21:08,043 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,043 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:08,043 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,043 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,046 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,049 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:08,049 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d436a1ae301ec26cf78d29bd05a18bd2 columnFamilyName A 2024-11-20T17:21:08,049 DEBUG [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:08,050 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.HStore(327): Store=d436a1ae301ec26cf78d29bd05a18bd2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:08,050 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,052 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:08,052 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d436a1ae301ec26cf78d29bd05a18bd2 columnFamilyName B 2024-11-20T17:21:08,052 DEBUG [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:08,053 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.HStore(327): Store=d436a1ae301ec26cf78d29bd05a18bd2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:08,053 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,055 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:08,055 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d436a1ae301ec26cf78d29bd05a18bd2 columnFamilyName C 2024-11-20T17:21:08,055 DEBUG [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:08,056 INFO [StoreOpener-d436a1ae301ec26cf78d29bd05a18bd2-1 {}] regionserver.HStore(327): Store=d436a1ae301ec26cf78d29bd05a18bd2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:08,056 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:08,058 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,058 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,061 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:21:08,063 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:08,066 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:21:08,067 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened d436a1ae301ec26cf78d29bd05a18bd2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59801436, jitterRate=-0.10888916254043579}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:21:08,068 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:08,070 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., pid=11, masterSystemTime=1732123268036 2024-11-20T17:21:08,072 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:08,073 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:08,073 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d436a1ae301ec26cf78d29bd05a18bd2, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:08,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T17:21:08,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 in 194 msec 2024-11-20T17:21:08,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T17:21:08,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, ASSIGN in 354 msec 2024-11-20T17:21:08,083 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:21:08,084 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123268083"}]},"ts":"1732123268083"} 2024-11-20T17:21:08,086 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:21:08,090 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:21:08,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2200 sec 2024-11-20T17:21:08,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:21:09,000 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T17:21:09,005 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-11-20T17:21:09,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,011 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,013 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,016 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:21:09,017 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:21:09,024 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-11-20T17:21:09,028 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,029 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-11-20T17:21:09,033 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-11-20T17:21:09,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,038 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-11-20T17:21:09,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,043 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-11-20T17:21:09,047 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,048 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-11-20T17:21:09,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-11-20T17:21:09,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,059 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-11-20T17:21:09,062 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,063 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-11-20T17:21:09,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:09,070 DEBUG [hconnection-0x48af454-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,070 DEBUG [hconnection-0x82c8c4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,073 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,074 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,074 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,076 DEBUG [hconnection-0x40eb5aeb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,076 DEBUG [hconnection-0x5e621c7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,076 DEBUG [hconnection-0x59c3390f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,077 DEBUG [hconnection-0x7b5d524b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,077 DEBUG [hconnection-0x34e8847a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,077 DEBUG [hconnection-0x25557d7e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:09,079 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,079 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:09,080 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,081 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47536, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,084 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,085 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,087 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47566, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:09,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T17:21:09,093 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:09,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:21:09,095 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:09,097 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:09,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:09,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:09,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:09,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:09,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:21:09,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:09,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:21:09,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:09,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/0a9da142b993446c884a0e14f5327b5b is 50, key is test_row_0/A:col10/1732123269141/Put/seqid=0 2024-11-20T17:21:09,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123329277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123329279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123329284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123329305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123329309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741839_1015 (size=12001) 2024-11-20T17:21:09,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/0a9da142b993446c884a0e14f5327b5b 2024-11-20T17:21:09,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:21:09,451 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:09,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:21:09,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7a65b29e11794baab57ec8ae20595449 is 50, key is test_row_0/B:col10/1732123269141/Put/seqid=0 2024-11-20T17:21:09,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123329452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123329452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123329453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123329454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123329454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:09,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741840_1016 (size=12001) 2024-11-20T17:21:09,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7a65b29e11794baab57ec8ae20595449 2024-11-20T17:21:09,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/77ff299657664e7eb936e034185276db is 50, key is test_row_0/C:col10/1732123269141/Put/seqid=0 2024-11-20T17:21:09,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741841_1017 (size=12001) 2024-11-20T17:21:09,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:09,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:21:09,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:09,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123329664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123329665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123329666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123329667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123329667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:21:09,777 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:09,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:21:09,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:09,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,884 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T17:21:09,887 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T17:21:09,888 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T17:21:09,932 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:09,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:21:09,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:09,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:09,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:09,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123329972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123329972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123329974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123329974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123329972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:09,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/77ff299657664e7eb936e034185276db 2024-11-20T17:21:09,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/0a9da142b993446c884a0e14f5327b5b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/0a9da142b993446c884a0e14f5327b5b 2024-11-20T17:21:10,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/0a9da142b993446c884a0e14f5327b5b, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:21:10,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7a65b29e11794baab57ec8ae20595449 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7a65b29e11794baab57ec8ae20595449 2024-11-20T17:21:10,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7a65b29e11794baab57ec8ae20595449, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:21:10,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/77ff299657664e7eb936e034185276db as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/77ff299657664e7eb936e034185276db 2024-11-20T17:21:10,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/77ff299657664e7eb936e034185276db, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:21:10,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d436a1ae301ec26cf78d29bd05a18bd2 in 894ms, sequenceid=14, compaction requested=false 2024-11-20T17:21:10,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:10,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:10,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:21:10,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:10,089 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:21:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:10,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:10,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/88ec75f0fe07425bbc883f98f04b0d82 is 50, key is test_row_0/A:col10/1732123269278/Put/seqid=0 2024-11-20T17:21:10,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741842_1018 (size=12001) 2024-11-20T17:21:10,128 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/88ec75f0fe07425bbc883f98f04b0d82 2024-11-20T17:21:10,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a32eec7457e7479f8b6ecaa5172a56f8 is 50, key is test_row_0/B:col10/1732123269278/Put/seqid=0 2024-11-20T17:21:10,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741843_1019 (size=12001) 2024-11-20T17:21:10,184 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a32eec7457e7479f8b6ecaa5172a56f8 2024-11-20T17:21:10,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:21:10,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/354a8aadb9a94e79ab23b3452dfb2368 is 50, key is test_row_0/C:col10/1732123269278/Put/seqid=0 2024-11-20T17:21:10,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741844_1020 (size=12001) 2024-11-20T17:21:10,229 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/354a8aadb9a94e79ab23b3452dfb2368 2024-11-20T17:21:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/88ec75f0fe07425bbc883f98f04b0d82 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/88ec75f0fe07425bbc883f98f04b0d82 2024-11-20T17:21:10,264 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/88ec75f0fe07425bbc883f98f04b0d82, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T17:21:10,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a32eec7457e7479f8b6ecaa5172a56f8 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a32eec7457e7479f8b6ecaa5172a56f8 2024-11-20T17:21:10,286 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a32eec7457e7479f8b6ecaa5172a56f8, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T17:21:10,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/354a8aadb9a94e79ab23b3452dfb2368 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/354a8aadb9a94e79ab23b3452dfb2368 2024-11-20T17:21:10,311 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/354a8aadb9a94e79ab23b3452dfb2368, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T17:21:10,314 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for d436a1ae301ec26cf78d29bd05a18bd2 in 226ms, sequenceid=37, compaction requested=false 2024-11-20T17:21:10,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:10,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:10,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T17:21:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T17:21:10,321 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T17:21:10,321 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2200 sec 2024-11-20T17:21:10,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.2410 sec 2024-11-20T17:21:10,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:10,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:10,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:10,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:10,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:10,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a799a4d87fd8445a81fe48a9f7d24b59 is 50, key is test_row_0/A:col10/1732123270510/Put/seqid=0 2024-11-20T17:21:10,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741845_1021 (size=19021) 2024-11-20T17:21:10,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a799a4d87fd8445a81fe48a9f7d24b59 2024-11-20T17:21:10,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/80abe4187c2f4b8eaf86e86cdd3e5f5e is 50, key is test_row_0/B:col10/1732123270510/Put/seqid=0 2024-11-20T17:21:10,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741846_1022 (size=12001) 2024-11-20T17:21:10,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/80abe4187c2f4b8eaf86e86cdd3e5f5e 2024-11-20T17:21:10,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123330595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123330601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123330604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123330611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123330616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/2e2694992d274051ad4cd3df0ffd2122 is 50, key is test_row_0/C:col10/1732123270510/Put/seqid=0 2024-11-20T17:21:10,638 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:21:10,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741847_1023 (size=12001) 2024-11-20T17:21:10,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/2e2694992d274051ad4cd3df0ffd2122 2024-11-20T17:21:10,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a799a4d87fd8445a81fe48a9f7d24b59 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a799a4d87fd8445a81fe48a9f7d24b59 2024-11-20T17:21:10,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a799a4d87fd8445a81fe48a9f7d24b59, entries=300, sequenceid=49, filesize=18.6 K 2024-11-20T17:21:10,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/80abe4187c2f4b8eaf86e86cdd3e5f5e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/80abe4187c2f4b8eaf86e86cdd3e5f5e 2024-11-20T17:21:10,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/80abe4187c2f4b8eaf86e86cdd3e5f5e, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T17:21:10,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/2e2694992d274051ad4cd3df0ffd2122 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/2e2694992d274051ad4cd3df0ffd2122 2024-11-20T17:21:10,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123330714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/2e2694992d274051ad4cd3df0ffd2122, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T17:21:10,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123330715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d436a1ae301ec26cf78d29bd05a18bd2 in 208ms, sequenceid=49, compaction requested=true 2024-11-20T17:21:10,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:10,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:10,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:10,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:10,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:10,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:10,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:10,728 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:10,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:21:10,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:10,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:10,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:10,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:10,731 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:10,733 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:10,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:10,735 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:10,735 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7a65b29e11794baab57ec8ae20595449, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a32eec7457e7479f8b6ecaa5172a56f8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/80abe4187c2f4b8eaf86e86cdd3e5f5e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=35.2 K 2024-11-20T17:21:10,737 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a65b29e11794baab57ec8ae20595449, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123269141 2024-11-20T17:21:10,738 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting a32eec7457e7479f8b6ecaa5172a56f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732123269278 2024-11-20T17:21:10,739 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 80abe4187c2f4b8eaf86e86cdd3e5f5e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123270504 2024-11-20T17:21:10,741 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:10,741 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:10,741 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:10,742 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/0a9da142b993446c884a0e14f5327b5b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/88ec75f0fe07425bbc883f98f04b0d82, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a799a4d87fd8445a81fe48a9f7d24b59] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=42.0 K 2024-11-20T17:21:10,743 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a9da142b993446c884a0e14f5327b5b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123269141 2024-11-20T17:21:10,744 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88ec75f0fe07425bbc883f98f04b0d82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732123269278 2024-11-20T17:21:10,745 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a799a4d87fd8445a81fe48a9f7d24b59, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123270493 2024-11-20T17:21:10,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/247267beb1e44fcd99b9c3161a9a5ec5 is 50, key is test_row_0/A:col10/1732123270722/Put/seqid=0 2024-11-20T17:21:10,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123330759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123330766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123330768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,782 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#10 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:10,783 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f14c0c28cfb74c9fb783971b2dc3f28e is 50, key is test_row_0/B:col10/1732123270510/Put/seqid=0 2024-11-20T17:21:10,798 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#11 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:10,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741848_1024 (size=14341) 2024-11-20T17:21:10,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/247267beb1e44fcd99b9c3161a9a5ec5 2024-11-20T17:21:10,801 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/fb271bb0e5fb47d8bec53a33ae42b947 is 50, key is test_row_0/A:col10/1732123270510/Put/seqid=0 2024-11-20T17:21:10,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741849_1025 (size=12104) 2024-11-20T17:21:10,825 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f14c0c28cfb74c9fb783971b2dc3f28e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f14c0c28cfb74c9fb783971b2dc3f28e 2024-11-20T17:21:10,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/6536699df0a64897886d61816d14673b is 50, key is test_row_0/B:col10/1732123270722/Put/seqid=0 2024-11-20T17:21:10,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741850_1026 (size=12104) 2024-11-20T17:21:10,851 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into f14c0c28cfb74c9fb783971b2dc3f28e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:10,852 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:10,852 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123270728; duration=0sec 2024-11-20T17:21:10,852 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:10,853 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:10,853 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:10,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:10,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:10,861 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:10,862 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/77ff299657664e7eb936e034185276db, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/354a8aadb9a94e79ab23b3452dfb2368, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/2e2694992d274051ad4cd3df0ffd2122] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=35.2 K 2024-11-20T17:21:10,865 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 77ff299657664e7eb936e034185276db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123269141 2024-11-20T17:21:10,868 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 354a8aadb9a94e79ab23b3452dfb2368, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732123269278 2024-11-20T17:21:10,869 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/fb271bb0e5fb47d8bec53a33ae42b947 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fb271bb0e5fb47d8bec53a33ae42b947 2024-11-20T17:21:10,869 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e2694992d274051ad4cd3df0ffd2122, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123270504 2024-11-20T17:21:10,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123330871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123330881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123330881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741851_1027 (size=12001) 2024-11-20T17:21:10,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/6536699df0a64897886d61816d14673b 2024-11-20T17:21:10,897 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into fb271bb0e5fb47d8bec53a33ae42b947(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:10,897 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:10,897 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123270724; duration=0sec 2024-11-20T17:21:10,897 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:10,897 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:10,899 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#13 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:10,901 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/5e0d71e3c6d14e57b673769ad705ab97 is 50, key is test_row_0/C:col10/1732123270510/Put/seqid=0 2024-11-20T17:21:10,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741852_1028 (size=12104) 2024-11-20T17:21:10,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/37976975e66742d7b85bea9c0dbefb95 is 50, key is test_row_0/C:col10/1732123270722/Put/seqid=0 2024-11-20T17:21:10,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741853_1029 (size=12001) 2024-11-20T17:21:10,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/37976975e66742d7b85bea9c0dbefb95 2024-11-20T17:21:10,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:10,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123330923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123330925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:10,930 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/5e0d71e3c6d14e57b673769ad705ab97 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/5e0d71e3c6d14e57b673769ad705ab97 2024-11-20T17:21:10,947 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 5e0d71e3c6d14e57b673769ad705ab97(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:10,947 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:10,947 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=13, startTime=1732123270728; duration=0sec 2024-11-20T17:21:10,947 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:10,947 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:10,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/247267beb1e44fcd99b9c3161a9a5ec5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/247267beb1e44fcd99b9c3161a9a5ec5 2024-11-20T17:21:10,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/247267beb1e44fcd99b9c3161a9a5ec5, entries=200, sequenceid=75, filesize=14.0 K 2024-11-20T17:21:10,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/6536699df0a64897886d61816d14673b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6536699df0a64897886d61816d14673b 2024-11-20T17:21:10,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6536699df0a64897886d61816d14673b, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T17:21:10,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/37976975e66742d7b85bea9c0dbefb95 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/37976975e66742d7b85bea9c0dbefb95 2024-11-20T17:21:11,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/37976975e66742d7b85bea9c0dbefb95, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T17:21:11,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for d436a1ae301ec26cf78d29bd05a18bd2 in 282ms, sequenceid=75, compaction requested=false 2024-11-20T17:21:11,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:11,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:21:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:11,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/02ce090b4cf84c79876b8658cadcd446 is 50, key is test_row_0/A:col10/1732123271082/Put/seqid=0 2024-11-20T17:21:11,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741854_1030 (size=14341) 2024-11-20T17:21:11,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123331190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123331194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123331194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:21:11,206 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T17:21:11,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:11,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T17:21:11,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:21:11,215 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:11,217 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:11,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:11,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123331234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123331235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123331298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123331300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123331301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:21:11,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:11,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:21:11,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:11,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123331503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123331504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123331505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:21:11,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/02ce090b4cf84c79876b8658cadcd446 2024-11-20T17:21:11,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:11,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:21:11,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:11,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/aed7419f0e054fb7a9df55a0b797c4ae is 50, key is test_row_0/B:col10/1732123271082/Put/seqid=0 2024-11-20T17:21:11,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741855_1031 (size=12001) 2024-11-20T17:21:11,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/aed7419f0e054fb7a9df55a0b797c4ae 2024-11-20T17:21:11,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/9e641872f30d4c47bddab4c53952a351 is 50, key is test_row_0/C:col10/1732123271082/Put/seqid=0 2024-11-20T17:21:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741856_1032 (size=12001) 2024-11-20T17:21:11,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/9e641872f30d4c47bddab4c53952a351 2024-11-20T17:21:11,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/02ce090b4cf84c79876b8658cadcd446 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/02ce090b4cf84c79876b8658cadcd446 2024-11-20T17:21:11,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/02ce090b4cf84c79876b8658cadcd446, entries=200, sequenceid=90, filesize=14.0 K 2024-11-20T17:21:11,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/aed7419f0e054fb7a9df55a0b797c4ae as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/aed7419f0e054fb7a9df55a0b797c4ae 2024-11-20T17:21:11,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/aed7419f0e054fb7a9df55a0b797c4ae, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T17:21:11,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/9e641872f30d4c47bddab4c53952a351 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9e641872f30d4c47bddab4c53952a351 2024-11-20T17:21:11,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:11,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:21:11,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:11,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9e641872f30d4c47bddab4c53952a351, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T17:21:11,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d436a1ae301ec26cf78d29bd05a18bd2 in 609ms, sequenceid=90, compaction requested=true 2024-11-20T17:21:11,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:11,694 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:11,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:11,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:11,695 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:11,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:11,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:11,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:11,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:11,698 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:11,698 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:11,698 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,698 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fb271bb0e5fb47d8bec53a33ae42b947, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/247267beb1e44fcd99b9c3161a9a5ec5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/02ce090b4cf84c79876b8658cadcd446] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=39.8 K 2024-11-20T17:21:11,699 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb271bb0e5fb47d8bec53a33ae42b947, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123270504 2024-11-20T17:21:11,701 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:11,701 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:11,701 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,701 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 247267beb1e44fcd99b9c3161a9a5ec5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732123270600 2024-11-20T17:21:11,701 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f14c0c28cfb74c9fb783971b2dc3f28e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6536699df0a64897886d61816d14673b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/aed7419f0e054fb7a9df55a0b797c4ae] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=35.3 K 2024-11-20T17:21:11,702 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02ce090b4cf84c79876b8658cadcd446, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123270735 2024-11-20T17:21:11,702 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f14c0c28cfb74c9fb783971b2dc3f28e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123270504 2024-11-20T17:21:11,705 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6536699df0a64897886d61816d14673b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732123270600 2024-11-20T17:21:11,707 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting aed7419f0e054fb7a9df55a0b797c4ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123270735 2024-11-20T17:21:11,729 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:11,730 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/5fce7c2a1dcf4289bdfa16066b0b3fb2 is 50, key is test_row_0/B:col10/1732123271082/Put/seqid=0 2024-11-20T17:21:11,731 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:11,732 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/44e69a6c5d834051a45ff6ab36594997 is 50, key is test_row_0/A:col10/1732123271082/Put/seqid=0 2024-11-20T17:21:11,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:11,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:21:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:11,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:11,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:11,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:11,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:11,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741858_1034 (size=12207) 2024-11-20T17:21:11,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741857_1033 (size=12207) 2024-11-20T17:21:11,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3be21fc0281c4d85ae32ca41ae61eb7e is 50, key is test_row_0/A:col10/1732123271178/Put/seqid=0 2024-11-20T17:21:11,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123331774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123331776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741859_1035 (size=12001) 2024-11-20T17:21:11,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3be21fc0281c4d85ae32ca41ae61eb7e 2024-11-20T17:21:11,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/884d3aeda8c54667955d64526febbafc is 50, key is test_row_0/B:col10/1732123271178/Put/seqid=0 2024-11-20T17:21:11,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123331808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123331809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123331811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741860_1036 (size=12001) 2024-11-20T17:21:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:21:11,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/884d3aeda8c54667955d64526febbafc 2024-11-20T17:21:11,842 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:11,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:21:11,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:11,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:11,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/bab6ddf4fe0847f28d579fae3991663e is 50, key is test_row_0/C:col10/1732123271178/Put/seqid=0 2024-11-20T17:21:11,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741861_1037 (size=12001) 2024-11-20T17:21:11,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/bab6ddf4fe0847f28d579fae3991663e 2024-11-20T17:21:11,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T17:21:11,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T17:21:11,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T17:21:11,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T17:21:11,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T17:21:11,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T17:21:11,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T17:21:11,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T17:21:11,879 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T17:21:11,880 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T17:21:11,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3be21fc0281c4d85ae32ca41ae61eb7e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3be21fc0281c4d85ae32ca41ae61eb7e 2024-11-20T17:21:11,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123331882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:11,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123331884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:11,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3be21fc0281c4d85ae32ca41ae61eb7e, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T17:21:11,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/884d3aeda8c54667955d64526febbafc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/884d3aeda8c54667955d64526febbafc 2024-11-20T17:21:11,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/884d3aeda8c54667955d64526febbafc, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T17:21:11,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/bab6ddf4fe0847f28d579fae3991663e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/bab6ddf4fe0847f28d579fae3991663e 2024-11-20T17:21:11,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/bab6ddf4fe0847f28d579fae3991663e, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T17:21:11,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d436a1ae301ec26cf78d29bd05a18bd2 in 168ms, sequenceid=116, compaction requested=true 2024-11-20T17:21:11,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T17:21:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:11,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-20T17:21:11,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:11,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:21:11,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:11,999 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:21:11,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:12,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:12,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:12,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:12,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:12,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:12,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/c19359efc8ac44d9be70d9e5671ce69c is 50, key is test_row_0/A:col10/1732123271750/Put/seqid=0 2024-11-20T17:21:12,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741862_1038 (size=12001) 2024-11-20T17:21:12,028 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/c19359efc8ac44d9be70d9e5671ce69c 2024-11-20T17:21:12,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/2f36e6eea3474788a53990a3c718a789 is 50, key is test_row_0/B:col10/1732123271750/Put/seqid=0 2024-11-20T17:21:12,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741863_1039 (size=12001) 2024-11-20T17:21:12,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:12,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:12,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123332156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123332156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,185 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/5fce7c2a1dcf4289bdfa16066b0b3fb2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/5fce7c2a1dcf4289bdfa16066b0b3fb2 2024-11-20T17:21:12,186 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/44e69a6c5d834051a45ff6ab36594997 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/44e69a6c5d834051a45ff6ab36594997 2024-11-20T17:21:12,204 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 5fce7c2a1dcf4289bdfa16066b0b3fb2(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:12,204 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:12,204 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123271695; duration=0sec 2024-11-20T17:21:12,204 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-20T17:21:12,204 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:12,205 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-20T17:21:12,206 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:12,206 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:12,206 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:12,207 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:12,207 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:12,209 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 44e69a6c5d834051a45ff6ab36594997(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:12,209 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:12,209 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123271694; duration=0sec 2024-11-20T17:21:12,209 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:12,209 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:12,209 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:12,210 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:12,210 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:12,210 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-20T17:21:12,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:12,211 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:12,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:12,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:12,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:12,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-20T17:21:12,211 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/5e0d71e3c6d14e57b673769ad705ab97, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/37976975e66742d7b85bea9c0dbefb95, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9e641872f30d4c47bddab4c53952a351, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/bab6ddf4fe0847f28d579fae3991663e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=47.0 K 2024-11-20T17:21:12,212 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e0d71e3c6d14e57b673769ad705ab97, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123270504 2024-11-20T17:21:12,213 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:12,213 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:12,213 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:12,213 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 37976975e66742d7b85bea9c0dbefb95, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732123270600 2024-11-20T17:21:12,213 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:12,214 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e641872f30d4c47bddab4c53952a351, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123270735 2024-11-20T17:21:12,214 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting bab6ddf4fe0847f28d579fae3991663e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123271178 2024-11-20T17:21:12,243 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#25 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:12,246 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/19c4c3b1863141d0bc5507a17dc8ea30 is 50, key is test_row_0/C:col10/1732123271178/Put/seqid=0 2024-11-20T17:21:12,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123332264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123332265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741864_1040 (size=12241) 2024-11-20T17:21:12,292 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/19c4c3b1863141d0bc5507a17dc8ea30 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/19c4c3b1863141d0bc5507a17dc8ea30 2024-11-20T17:21:12,303 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 19c4c3b1863141d0bc5507a17dc8ea30(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:12,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:12,303 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=12, startTime=1732123271917; duration=0sec 2024-11-20T17:21:12,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:12,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:12,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123332315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123332314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:21:12,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123332318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,473 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/2f36e6eea3474788a53990a3c718a789 2024-11-20T17:21:12,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123332469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123332470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/6dbac4fac49c47ea8dfffa352c36149c is 50, key is test_row_0/C:col10/1732123271750/Put/seqid=0 2024-11-20T17:21:12,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741865_1041 (size=12001) 2024-11-20T17:21:12,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123332775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123332777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:12,929 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/6dbac4fac49c47ea8dfffa352c36149c 2024-11-20T17:21:12,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/c19359efc8ac44d9be70d9e5671ce69c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/c19359efc8ac44d9be70d9e5671ce69c 2024-11-20T17:21:12,959 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/c19359efc8ac44d9be70d9e5671ce69c, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T17:21:12,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/2f36e6eea3474788a53990a3c718a789 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/2f36e6eea3474788a53990a3c718a789 2024-11-20T17:21:12,972 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/2f36e6eea3474788a53990a3c718a789, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T17:21:12,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T17:21:12,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/6dbac4fac49c47ea8dfffa352c36149c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6dbac4fac49c47ea8dfffa352c36149c 2024-11-20T17:21:12,985 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6dbac4fac49c47ea8dfffa352c36149c, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T17:21:12,988 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for d436a1ae301ec26cf78d29bd05a18bd2 in 990ms, sequenceid=126, compaction requested=true 2024-11-20T17:21:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T17:21:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T17:21:12,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T17:21:12,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7730 sec 2024-11-20T17:21:12,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.7840 sec 2024-11-20T17:21:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:13,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T17:21:13,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:13,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:13,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:13,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:13,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:13,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:13,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/82401d6cfa4b47948d317c76c28a6e71 is 50, key is test_row_0/A:col10/1732123272147/Put/seqid=0 2024-11-20T17:21:13,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123333303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123333305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:21:13,324 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T17:21:13,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741866_1042 (size=12151) 2024-11-20T17:21:13,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123333326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T17:21:13,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123333329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,332 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:13,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123333329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:21:13,333 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:13,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:13,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/82401d6cfa4b47948d317c76c28a6e71 2024-11-20T17:21:13,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/245cadb77dc34dc08bd806d7c359c6e7 is 50, key is test_row_0/B:col10/1732123272147/Put/seqid=0 2024-11-20T17:21:13,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741867_1043 (size=12151) 2024-11-20T17:21:13,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123333408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123333409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:21:13,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:13,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:21:13,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:13,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123333612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123333615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:21:13,640 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:13,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:21:13,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:13,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/245cadb77dc34dc08bd806d7c359c6e7 2024-11-20T17:21:13,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:13,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:21:13,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:13,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3d57f0011fe14077b58502f91aeb5db4 is 50, key is test_row_0/C:col10/1732123272147/Put/seqid=0 2024-11-20T17:21:13,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741868_1044 (size=12151) 2024-11-20T17:21:13,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3d57f0011fe14077b58502f91aeb5db4 2024-11-20T17:21:13,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/82401d6cfa4b47948d317c76c28a6e71 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/82401d6cfa4b47948d317c76c28a6e71 2024-11-20T17:21:13,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/82401d6cfa4b47948d317c76c28a6e71, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T17:21:13,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/245cadb77dc34dc08bd806d7c359c6e7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/245cadb77dc34dc08bd806d7c359c6e7 2024-11-20T17:21:13,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/245cadb77dc34dc08bd806d7c359c6e7, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T17:21:13,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3d57f0011fe14077b58502f91aeb5db4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3d57f0011fe14077b58502f91aeb5db4 2024-11-20T17:21:13,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3d57f0011fe14077b58502f91aeb5db4, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T17:21:13,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for d436a1ae301ec26cf78d29bd05a18bd2 in 606ms, sequenceid=157, compaction requested=true 2024-11-20T17:21:13,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:13,892 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:13,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:13,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:13,892 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:13,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:13,896 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:13,897 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:13,897 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:13,897 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:13,897 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,897 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,897 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/44e69a6c5d834051a45ff6ab36594997, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3be21fc0281c4d85ae32ca41ae61eb7e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/c19359efc8ac44d9be70d9e5671ce69c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/82401d6cfa4b47948d317c76c28a6e71] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=47.2 K 2024-11-20T17:21:13,899 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/5fce7c2a1dcf4289bdfa16066b0b3fb2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/884d3aeda8c54667955d64526febbafc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/2f36e6eea3474788a53990a3c718a789, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/245cadb77dc34dc08bd806d7c359c6e7] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=47.2 K 2024-11-20T17:21:13,899 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fce7c2a1dcf4289bdfa16066b0b3fb2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123270735 2024-11-20T17:21:13,900 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44e69a6c5d834051a45ff6ab36594997, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123270735 2024-11-20T17:21:13,900 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 884d3aeda8c54667955d64526febbafc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123271178 2024-11-20T17:21:13,901 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3be21fc0281c4d85ae32ca41ae61eb7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123271178 2024-11-20T17:21:13,901 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f36e6eea3474788a53990a3c718a789, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732123271750 2024-11-20T17:21:13,902 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting c19359efc8ac44d9be70d9e5671ce69c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732123271750 2024-11-20T17:21:13,903 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 245cadb77dc34dc08bd806d7c359c6e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123272147 2024-11-20T17:21:13,904 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82401d6cfa4b47948d317c76c28a6e71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123272147 2024-11-20T17:21:13,931 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#30 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:13,932 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/07f435337b3b4e3cbdf5030e1a12c437 is 50, key is test_row_0/B:col10/1732123272147/Put/seqid=0 2024-11-20T17:21:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:13,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:21:13,943 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:13,944 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/60eb3fff24f945e18bb06242cc949678 is 50, key is test_row_0/A:col10/1732123272147/Put/seqid=0 2024-11-20T17:21:13,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:13,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:21:13,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:13,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/094edeb7be964ef3a128c78b86a9299d is 50, key is test_row_1/A:col10/1732123273929/Put/seqid=0 2024-11-20T17:21:13,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:13,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741869_1045 (size=12493) 2024-11-20T17:21:13,979 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/07f435337b3b4e3cbdf5030e1a12c437 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/07f435337b3b4e3cbdf5030e1a12c437 2024-11-20T17:21:13,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741870_1046 (size=12493) 2024-11-20T17:21:13,991 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 07f435337b3b4e3cbdf5030e1a12c437(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:13,991 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:13,991 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=12, startTime=1732123273892; duration=0sec 2024-11-20T17:21:13,991 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:13,991 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:13,992 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:13,994 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:13,995 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:13,995 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:13,995 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/19c4c3b1863141d0bc5507a17dc8ea30, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6dbac4fac49c47ea8dfffa352c36149c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3d57f0011fe14077b58502f91aeb5db4] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=35.5 K 2024-11-20T17:21:13,996 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 19c4c3b1863141d0bc5507a17dc8ea30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123271178 2024-11-20T17:21:13,997 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dbac4fac49c47ea8dfffa352c36149c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732123271750 2024-11-20T17:21:13,998 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d57f0011fe14077b58502f91aeb5db4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123272147 2024-11-20T17:21:14,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741871_1047 (size=9757) 2024-11-20T17:21:14,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/094edeb7be964ef3a128c78b86a9299d 2024-11-20T17:21:14,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f1068ead3f874160b685d97d69bb8f95 is 50, key is test_row_1/B:col10/1732123273929/Put/seqid=0 2024-11-20T17:21:14,034 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:14,035 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/59b0d8c85427441c9a6abd22ad1453a2 is 50, key is test_row_0/C:col10/1732123272147/Put/seqid=0 2024-11-20T17:21:14,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741872_1048 (size=9757) 2024-11-20T17:21:14,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f1068ead3f874160b685d97d69bb8f95 2024-11-20T17:21:14,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741873_1049 (size=12493) 2024-11-20T17:21:14,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e2de71420de044feb9dbffe027691976 is 50, key is test_row_1/C:col10/1732123273929/Put/seqid=0 2024-11-20T17:21:14,075 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/59b0d8c85427441c9a6abd22ad1453a2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/59b0d8c85427441c9a6abd22ad1453a2 2024-11-20T17:21:14,086 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 59b0d8c85427441c9a6abd22ad1453a2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:14,087 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:14,087 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=13, startTime=1732123273893; duration=0sec 2024-11-20T17:21:14,087 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:14,087 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:14,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741874_1050 (size=9757) 2024-11-20T17:21:14,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e2de71420de044feb9dbffe027691976 2024-11-20T17:21:14,108 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:14,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:21:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:14,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:14,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:14,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:14,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/094edeb7be964ef3a128c78b86a9299d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/094edeb7be964ef3a128c78b86a9299d 2024-11-20T17:21:14,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/094edeb7be964ef3a128c78b86a9299d, entries=100, sequenceid=168, filesize=9.5 K 2024-11-20T17:21:14,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f1068ead3f874160b685d97d69bb8f95 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f1068ead3f874160b685d97d69bb8f95 2024-11-20T17:21:14,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f1068ead3f874160b685d97d69bb8f95, entries=100, sequenceid=168, filesize=9.5 K 2024-11-20T17:21:14,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e2de71420de044feb9dbffe027691976 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e2de71420de044feb9dbffe027691976 2024-11-20T17:21:14,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e2de71420de044feb9dbffe027691976, entries=100, sequenceid=168, filesize=9.5 K 2024-11-20T17:21:14,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d436a1ae301ec26cf78d29bd05a18bd2 in 221ms, sequenceid=168, compaction requested=false 2024-11-20T17:21:14,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:14,264 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:14,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:21:14,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:14,265 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:21:14,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:14,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:14,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:14,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/8506204ed40840af867b5bfaac9ff469 is 50, key is test_row_0/A:col10/1732123274010/Put/seqid=0 2024-11-20T17:21:14,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741875_1051 (size=12151) 2024-11-20T17:21:14,309 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/8506204ed40840af867b5bfaac9ff469 2024-11-20T17:21:14,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/046a31aaec7246dd8fcffd7c75ea9295 is 50, key is test_row_0/B:col10/1732123274010/Put/seqid=0 2024-11-20T17:21:14,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:14,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:14,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741876_1052 (size=12151) 2024-11-20T17:21:14,339 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/046a31aaec7246dd8fcffd7c75ea9295 2024-11-20T17:21:14,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/9b9286096653477da8a661a2e3329064 is 50, key is test_row_0/C:col10/1732123274010/Put/seqid=0 2024-11-20T17:21:14,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741877_1053 (size=12151) 2024-11-20T17:21:14,381 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/9b9286096653477da8a661a2e3329064 2024-11-20T17:21:14,403 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/60eb3fff24f945e18bb06242cc949678 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/60eb3fff24f945e18bb06242cc949678 2024-11-20T17:21:14,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/8506204ed40840af867b5bfaac9ff469 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/8506204ed40840af867b5bfaac9ff469 2024-11-20T17:21:14,415 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 60eb3fff24f945e18bb06242cc949678(size=12.2 K), total size for store is 21.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:14,415 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:14,415 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=12, startTime=1732123273892; duration=0sec 2024-11-20T17:21:14,415 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:14,415 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:14,423 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/8506204ed40840af867b5bfaac9ff469, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T17:21:14,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/046a31aaec7246dd8fcffd7c75ea9295 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/046a31aaec7246dd8fcffd7c75ea9295 2024-11-20T17:21:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:21:14,439 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/046a31aaec7246dd8fcffd7c75ea9295, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T17:21:14,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/9b9286096653477da8a661a2e3329064 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9b9286096653477da8a661a2e3329064 2024-11-20T17:21:14,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,457 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9b9286096653477da8a661a2e3329064, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T17:21:14,459 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d436a1ae301ec26cf78d29bd05a18bd2 in 194ms, sequenceid=195, compaction requested=true 2024-11-20T17:21:14,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:14,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:14,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T17:21:14,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T17:21:14,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T17:21:14,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1280 sec 2024-11-20T17:21:14,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.1400 sec 2024-11-20T17:21:14,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:14,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:21:14,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:14,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:14,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3f66119f3b1d4296bb0580926a6cde9a is 50, key is test_row_0/A:col10/1732123274659/Put/seqid=0 2024-11-20T17:21:14,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741878_1054 (size=12151) 2024-11-20T17:21:14,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3f66119f3b1d4296bb0580926a6cde9a 2024-11-20T17:21:14,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a89459494c554b1fbf7de61aaf28362a is 50, key is test_row_0/B:col10/1732123274659/Put/seqid=0 2024-11-20T17:21:14,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741879_1055 (size=12151) 2024-11-20T17:21:14,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a89459494c554b1fbf7de61aaf28362a 2024-11-20T17:21:14,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e3c0182e0a624046bc0dae7796218adf is 50, key is test_row_0/C:col10/1732123274659/Put/seqid=0 2024-11-20T17:21:14,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741880_1056 (size=12151) 2024-11-20T17:21:14,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e3c0182e0a624046bc0dae7796218adf 2024-11-20T17:21:14,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3f66119f3b1d4296bb0580926a6cde9a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f66119f3b1d4296bb0580926a6cde9a 2024-11-20T17:21:14,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f66119f3b1d4296bb0580926a6cde9a, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T17:21:14,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a89459494c554b1fbf7de61aaf28362a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a89459494c554b1fbf7de61aaf28362a 2024-11-20T17:21:14,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a89459494c554b1fbf7de61aaf28362a, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T17:21:14,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e3c0182e0a624046bc0dae7796218adf as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e3c0182e0a624046bc0dae7796218adf 2024-11-20T17:21:14,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e3c0182e0a624046bc0dae7796218adf, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T17:21:14,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d436a1ae301ec26cf78d29bd05a18bd2 in 136ms, sequenceid=209, compaction requested=true 2024-11-20T17:21:14,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:14,796 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:14,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:14,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:14,797 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:14,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:14,799 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:14,799 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:14,799 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:14,799 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/60eb3fff24f945e18bb06242cc949678, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/094edeb7be964ef3a128c78b86a9299d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/8506204ed40840af867b5bfaac9ff469, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f66119f3b1d4296bb0580926a6cde9a] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=45.5 K 2024-11-20T17:21:14,800 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:14,800 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:14,800 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:14,800 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/07f435337b3b4e3cbdf5030e1a12c437, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f1068ead3f874160b685d97d69bb8f95, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/046a31aaec7246dd8fcffd7c75ea9295, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a89459494c554b1fbf7de61aaf28362a] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=45.5 K 2024-11-20T17:21:14,800 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60eb3fff24f945e18bb06242cc949678, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123272147 2024-11-20T17:21:14,801 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 07f435337b3b4e3cbdf5030e1a12c437, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123272147 2024-11-20T17:21:14,801 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 094edeb7be964ef3a128c78b86a9299d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732123273303 2024-11-20T17:21:14,801 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f1068ead3f874160b685d97d69bb8f95, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732123273303 2024-11-20T17:21:14,802 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8506204ed40840af867b5bfaac9ff469, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123274007 2024-11-20T17:21:14,803 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f66119f3b1d4296bb0580926a6cde9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732123274345 2024-11-20T17:21:14,803 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 046a31aaec7246dd8fcffd7c75ea9295, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123274007 2024-11-20T17:21:14,803 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting a89459494c554b1fbf7de61aaf28362a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732123274345 2024-11-20T17:21:14,825 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:14,826 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f18a3fe3c4914673b5309a7a25e95aed is 50, key is test_row_0/B:col10/1732123274659/Put/seqid=0 2024-11-20T17:21:14,826 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:14,827 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/cd499cf7922d4b828941dedf78adad9a is 50, key is test_row_0/A:col10/1732123274659/Put/seqid=0 2024-11-20T17:21:14,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:21:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:14,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:14,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:14,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:14,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/5432d4ce26a14b92ae3b5875b3e7061c is 50, key is test_row_0/A:col10/1732123274822/Put/seqid=0 2024-11-20T17:21:14,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741881_1057 (size=12629) 2024-11-20T17:21:14,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741882_1058 (size=12151) 2024-11-20T17:21:14,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/5432d4ce26a14b92ae3b5875b3e7061c 2024-11-20T17:21:14,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741883_1059 (size=12629) 2024-11-20T17:21:14,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a3a95f17fe3c44cdb9e94a625a64ae91 is 50, key is test_row_0/B:col10/1732123274822/Put/seqid=0 2024-11-20T17:21:14,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741884_1060 (size=12151) 2024-11-20T17:21:14,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a3a95f17fe3c44cdb9e94a625a64ae91 2024-11-20T17:21:14,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123334956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:14,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123334955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:14,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/ab2b704c00284dc9a1df886da43deb1a is 50, key is test_row_0/C:col10/1732123274822/Put/seqid=0 2024-11-20T17:21:14,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741885_1061 (size=12151) 2024-11-20T17:21:14,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/ab2b704c00284dc9a1df886da43deb1a 2024-11-20T17:21:14,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/5432d4ce26a14b92ae3b5875b3e7061c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/5432d4ce26a14b92ae3b5875b3e7061c 2024-11-20T17:21:15,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/5432d4ce26a14b92ae3b5875b3e7061c, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T17:21:15,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/a3a95f17fe3c44cdb9e94a625a64ae91 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a3a95f17fe3c44cdb9e94a625a64ae91 2024-11-20T17:21:15,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a3a95f17fe3c44cdb9e94a625a64ae91, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T17:21:15,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/ab2b704c00284dc9a1df886da43deb1a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/ab2b704c00284dc9a1df886da43deb1a 2024-11-20T17:21:15,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/ab2b704c00284dc9a1df886da43deb1a, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T17:21:15,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d436a1ae301ec26cf78d29bd05a18bd2 in 207ms, sequenceid=234, compaction requested=true 2024-11-20T17:21:15,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T17:21:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:15,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-20T17:21:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:15,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:21:15,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:15,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:15,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:15,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/007108958e064f0087ed6dad51783e63 is 50, key is test_row_0/A:col10/1732123275163/Put/seqid=0 2024-11-20T17:21:15,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741886_1062 (size=12151) 2024-11-20T17:21:15,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123335211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123335214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,285 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f18a3fe3c4914673b5309a7a25e95aed as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f18a3fe3c4914673b5309a7a25e95aed 2024-11-20T17:21:15,295 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into f18a3fe3c4914673b5309a7a25e95aed(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:15,295 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,295 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=12, startTime=1732123274797; duration=0sec 2024-11-20T17:21:15,296 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-20T17:21:15,296 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:15,297 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 4 compacting, 1 eligible, 16 blocking 2024-11-20T17:21:15,297 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:15,297 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:15,298 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:15,298 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:15,298 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:21:15,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 58703 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:21:15,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:15,301 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,301 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/59b0d8c85427441c9a6abd22ad1453a2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e2de71420de044feb9dbffe027691976, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9b9286096653477da8a661a2e3329064, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e3c0182e0a624046bc0dae7796218adf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/ab2b704c00284dc9a1df886da43deb1a] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=57.3 K 2024-11-20T17:21:15,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 59b0d8c85427441c9a6abd22ad1453a2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123272147 2024-11-20T17:21:15,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e2de71420de044feb9dbffe027691976, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732123273303 2024-11-20T17:21:15,304 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b9286096653477da8a661a2e3329064, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123274007 2024-11-20T17:21:15,305 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e3c0182e0a624046bc0dae7796218adf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732123274345 2024-11-20T17:21:15,306 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ab2b704c00284dc9a1df886da43deb1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123274709 2024-11-20T17:21:15,317 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/cd499cf7922d4b828941dedf78adad9a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/cd499cf7922d4b828941dedf78adad9a 2024-11-20T17:21:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123335316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123335320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,328 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into cd499cf7922d4b828941dedf78adad9a(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:15,328 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,328 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=12, startTime=1732123274796; duration=0sec 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 5 compacting, 0 eligible, 16 blocking 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-20T17:21:15,330 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:15,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:15,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:15,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:15,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:15,331 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/d071b6e7ce9f4157a63280535aa1aed9 is 50, key is test_row_0/C:col10/1732123274822/Put/seqid=0 2024-11-20T17:21:15,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123335335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,338 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:15,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123335345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,348 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:15,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123335350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,352 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:15,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741887_1063 (size=12663) 2024-11-20T17:21:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:21:15,439 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T17:21:15,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:15,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T17:21:15,444 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:15,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:21:15,450 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:15,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:15,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123335521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123335522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:21:15,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/007108958e064f0087ed6dad51783e63 2024-11-20T17:21:15,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/d32617edf68245b5b218c4ebe446709d is 50, key is test_row_0/B:col10/1732123275163/Put/seqid=0 2024-11-20T17:21:15,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741888_1064 (size=12151) 2024-11-20T17:21:15,604 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:15,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:21:15,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:15,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:15,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:15,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/d32617edf68245b5b218c4ebe446709d 2024-11-20T17:21:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:15,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/988d60d2c45c455dba9bcab0559c2fec is 50, key is test_row_0/C:col10/1732123275163/Put/seqid=0 2024-11-20T17:21:15,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741889_1065 (size=12151) 2024-11-20T17:21:15,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/988d60d2c45c455dba9bcab0559c2fec 2024-11-20T17:21:15,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/007108958e064f0087ed6dad51783e63 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/007108958e064f0087ed6dad51783e63 2024-11-20T17:21:15,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/007108958e064f0087ed6dad51783e63, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T17:21:15,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/d32617edf68245b5b218c4ebe446709d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d32617edf68245b5b218c4ebe446709d 2024-11-20T17:21:15,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d32617edf68245b5b218c4ebe446709d, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T17:21:15,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/988d60d2c45c455dba9bcab0559c2fec as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/988d60d2c45c455dba9bcab0559c2fec 2024-11-20T17:21:15,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/988d60d2c45c455dba9bcab0559c2fec, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T17:21:15,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d436a1ae301ec26cf78d29bd05a18bd2 in 506ms, sequenceid=247, compaction requested=true 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:15,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T17:21:15,673 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 5 compacting, 1 eligible, 16 blocking 2024-11-20T17:21:15,673 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:15,673 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:15,674 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:15,674 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:15,674 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:15,675 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:15,676 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:15,676 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,676 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/cd499cf7922d4b828941dedf78adad9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/5432d4ce26a14b92ae3b5875b3e7061c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/007108958e064f0087ed6dad51783e63] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.1 K 2024-11-20T17:21:15,677 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd499cf7922d4b828941dedf78adad9a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732123274345 2024-11-20T17:21:15,677 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5432d4ce26a14b92ae3b5875b3e7061c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123274709 2024-11-20T17:21:15,678 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 007108958e064f0087ed6dad51783e63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123274840 2024-11-20T17:21:15,691 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:15,692 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3319e269e5624cef8f6035d1b144021d is 50, key is test_row_0/A:col10/1732123275163/Put/seqid=0 2024-11-20T17:21:15,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741890_1066 (size=12731) 2024-11-20T17:21:15,721 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3319e269e5624cef8f6035d1b144021d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3319e269e5624cef8f6035d1b144021d 2024-11-20T17:21:15,728 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 3319e269e5624cef8f6035d1b144021d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:15,729 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,729 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123275672; duration=0sec 2024-11-20T17:21:15,729 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:15,729 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:15,729 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:15,731 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:15,731 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:15,731 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,731 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f18a3fe3c4914673b5309a7a25e95aed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a3a95f17fe3c44cdb9e94a625a64ae91, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d32617edf68245b5b218c4ebe446709d] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.1 K 2024-11-20T17:21:15,732 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting f18a3fe3c4914673b5309a7a25e95aed, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732123274345 2024-11-20T17:21:15,732 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3a95f17fe3c44cdb9e94a625a64ae91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123274709 2024-11-20T17:21:15,732 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d32617edf68245b5b218c4ebe446709d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123274840 2024-11-20T17:21:15,742 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:15,743 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/1d268ddfc69f47a1a48e226cc09e5076 is 50, key is test_row_0/B:col10/1732123275163/Put/seqid=0 2024-11-20T17:21:15,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:21:15,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741891_1067 (size=12731) 2024-11-20T17:21:15,759 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:15,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:21:15,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,760 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:21:15,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:15,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:15,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:15,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,764 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/d071b6e7ce9f4157a63280535aa1aed9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d071b6e7ce9f4157a63280535aa1aed9 2024-11-20T17:21:15,776 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into d071b6e7ce9f4157a63280535aa1aed9(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:15,776 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,776 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=11, startTime=1732123275035; duration=0sec 2024-11-20T17:21:15,778 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:15,778 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:15,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/fbc86b289f3145419972b888b135353b is 50, key is test_row_0/A:col10/1732123275196/Put/seqid=0 2024-11-20T17:21:15,782 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/1d268ddfc69f47a1a48e226cc09e5076 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1d268ddfc69f47a1a48e226cc09e5076 2024-11-20T17:21:15,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741892_1068 (size=12301) 2024-11-20T17:21:15,791 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 1d268ddfc69f47a1a48e226cc09e5076(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:15,791 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,791 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123275672; duration=0sec 2024-11-20T17:21:15,792 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:15,792 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:15,792 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/fbc86b289f3145419972b888b135353b 2024-11-20T17:21:15,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/9e40c9fea11f4b3187dc71a86995127e is 50, key is test_row_0/B:col10/1732123275196/Put/seqid=0 2024-11-20T17:21:15,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:15,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:15,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741893_1069 (size=12301) 2024-11-20T17:21:15,844 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/9e40c9fea11f4b3187dc71a86995127e 2024-11-20T17:21:15,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123335844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:15,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123335846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:15,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/81189d68417b48098491148bc27eb54e is 50, key is test_row_0/C:col10/1732123275196/Put/seqid=0 2024-11-20T17:21:15,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741894_1070 (size=12301) 2024-11-20T17:21:15,885 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/81189d68417b48098491148bc27eb54e 2024-11-20T17:21:15,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/fbc86b289f3145419972b888b135353b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fbc86b289f3145419972b888b135353b 2024-11-20T17:21:15,905 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fbc86b289f3145419972b888b135353b, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T17:21:15,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/9e40c9fea11f4b3187dc71a86995127e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/9e40c9fea11f4b3187dc71a86995127e 2024-11-20T17:21:15,915 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/9e40c9fea11f4b3187dc71a86995127e, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T17:21:15,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/81189d68417b48098491148bc27eb54e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/81189d68417b48098491148bc27eb54e 2024-11-20T17:21:15,922 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/81189d68417b48098491148bc27eb54e, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T17:21:15,924 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d436a1ae301ec26cf78d29bd05a18bd2 in 164ms, sequenceid=273, compaction requested=true 2024-11-20T17:21:15,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:15,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:15,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T17:21:15,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T17:21:15,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T17:21:15,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 476 msec 2024-11-20T17:21:15,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 490 msec 2024-11-20T17:21:15,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:15,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:21:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:15,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:15,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/29022e865fe044c0b5adc47050721f51 is 50, key is test_row_0/A:col10/1732123275838/Put/seqid=0 2024-11-20T17:21:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741895_1071 (size=14741) 2024-11-20T17:21:15,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/29022e865fe044c0b5adc47050721f51 2024-11-20T17:21:15,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/09c7ace4dacc48cf87ce1bb86dce3dc9 is 50, key is test_row_0/B:col10/1732123275838/Put/seqid=0 2024-11-20T17:21:15,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741896_1072 (size=12301) 2024-11-20T17:21:15,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/09c7ace4dacc48cf87ce1bb86dce3dc9 2024-11-20T17:21:16,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/d861605c95624608bb296ce3a85d411c is 50, key is test_row_0/C:col10/1732123275838/Put/seqid=0 2024-11-20T17:21:16,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741897_1073 (size=12301) 2024-11-20T17:21:16,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123336041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123336042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:21:16,048 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T17:21:16,050 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:16,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T17:21:16,053 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:16,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:21:16,054 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:16,054 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:16,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123336144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123336148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:21:16,206 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:16,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T17:21:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:16,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:16,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123336349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123336351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:21:16,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:16,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T17:21:16,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:16,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:16,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:16,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/d861605c95624608bb296ce3a85d411c 2024-11-20T17:21:16,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/29022e865fe044c0b5adc47050721f51 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/29022e865fe044c0b5adc47050721f51 2024-11-20T17:21:16,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/29022e865fe044c0b5adc47050721f51, entries=200, sequenceid=289, filesize=14.4 K 2024-11-20T17:21:16,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/09c7ace4dacc48cf87ce1bb86dce3dc9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/09c7ace4dacc48cf87ce1bb86dce3dc9 2024-11-20T17:21:16,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/09c7ace4dacc48cf87ce1bb86dce3dc9, entries=150, sequenceid=289, filesize=12.0 K 2024-11-20T17:21:16,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/d861605c95624608bb296ce3a85d411c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d861605c95624608bb296ce3a85d411c 2024-11-20T17:21:16,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d861605c95624608bb296ce3a85d411c, entries=150, sequenceid=289, filesize=12.0 K 2024-11-20T17:21:16,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d436a1ae301ec26cf78d29bd05a18bd2 in 495ms, sequenceid=289, compaction requested=true 2024-11-20T17:21:16,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:16,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:16,446 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:16,446 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:16,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:16,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:16,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:16,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:16,448 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:16,448 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:16,448 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,448 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3319e269e5624cef8f6035d1b144021d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fbc86b289f3145419972b888b135353b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/29022e865fe044c0b5adc47050721f51] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=38.8 K 2024-11-20T17:21:16,447 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:16,448 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:16,449 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,449 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1d268ddfc69f47a1a48e226cc09e5076, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/9e40c9fea11f4b3187dc71a86995127e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/09c7ace4dacc48cf87ce1bb86dce3dc9] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.5 K 2024-11-20T17:21:16,449 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3319e269e5624cef8f6035d1b144021d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123274840 2024-11-20T17:21:16,449 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbc86b289f3145419972b888b135353b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732123275196 2024-11-20T17:21:16,449 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d268ddfc69f47a1a48e226cc09e5076, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123274840 2024-11-20T17:21:16,450 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29022e865fe044c0b5adc47050721f51, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732123275838 2024-11-20T17:21:16,450 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e40c9fea11f4b3187dc71a86995127e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732123275196 2024-11-20T17:21:16,451 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 09c7ace4dacc48cf87ce1bb86dce3dc9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732123275838 2024-11-20T17:21:16,462 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#59 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:16,463 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/43c2d327c83b4c2c8f8dfb57a4d254d2 is 50, key is test_row_0/B:col10/1732123275838/Put/seqid=0 2024-11-20T17:21:16,466 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:16,467 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/83b55334c3654a02a04cfa676662e5a3 is 50, key is test_row_0/A:col10/1732123275838/Put/seqid=0 2024-11-20T17:21:16,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741898_1074 (size=12983) 2024-11-20T17:21:16,483 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/43c2d327c83b4c2c8f8dfb57a4d254d2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/43c2d327c83b4c2c8f8dfb57a4d254d2 2024-11-20T17:21:16,494 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 43c2d327c83b4c2c8f8dfb57a4d254d2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:16,494 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,494 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123276446; duration=0sec 2024-11-20T17:21:16,495 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:16,495 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:16,495 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:16,498 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:16,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741899_1075 (size=12983) 2024-11-20T17:21:16,499 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:16,499 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,499 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d071b6e7ce9f4157a63280535aa1aed9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/988d60d2c45c455dba9bcab0559c2fec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/81189d68417b48098491148bc27eb54e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d861605c95624608bb296ce3a85d411c] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=48.3 K 2024-11-20T17:21:16,499 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d071b6e7ce9f4157a63280535aa1aed9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123274709 2024-11-20T17:21:16,500 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 988d60d2c45c455dba9bcab0559c2fec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123274840 2024-11-20T17:21:16,501 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 81189d68417b48098491148bc27eb54e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732123275196 2024-11-20T17:21:16,501 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d861605c95624608bb296ce3a85d411c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732123275838 2024-11-20T17:21:16,514 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:16,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T17:21:16,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,515 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:16,515 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:21:16,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:16,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:16,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,516 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/29d0b5c73719449284ac177fbac5dde1 is 50, key is test_row_0/C:col10/1732123275838/Put/seqid=0 2024-11-20T17:21:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/742de9f588ef4ec595f02fa68b809209 is 50, key is test_row_0/A:col10/1732123276040/Put/seqid=0 2024-11-20T17:21:16,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741900_1076 (size=12949) 2024-11-20T17:21:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741901_1077 (size=12301) 2024-11-20T17:21:16,553 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/742de9f588ef4ec595f02fa68b809209 2024-11-20T17:21:16,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f17806586d264c3fb82f1948aa37a29a is 50, key is test_row_0/B:col10/1732123276040/Put/seqid=0 2024-11-20T17:21:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741902_1078 (size=12301) 2024-11-20T17:21:16,597 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f17806586d264c3fb82f1948aa37a29a 2024-11-20T17:21:16,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/b8a7c0a49b434533b3f6358b66193811 is 50, key is test_row_0/C:col10/1732123276040/Put/seqid=0 2024-11-20T17:21:16,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741903_1079 (size=12301) 2024-11-20T17:21:16,635 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/b8a7c0a49b434533b3f6358b66193811 2024-11-20T17:21:16,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/742de9f588ef4ec595f02fa68b809209 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/742de9f588ef4ec595f02fa68b809209 2024-11-20T17:21:16,655 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/742de9f588ef4ec595f02fa68b809209, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T17:21:16,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:16,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:21:16,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:16,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-11-20T17:21:16,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/f17806586d264c3fb82f1948aa37a29a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f17806586d264c3fb82f1948aa37a29a 2024-11-20T17:21:16,669 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f17806586d264c3fb82f1948aa37a29a, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T17:21:16,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/b8a7c0a49b434533b3f6358b66193811 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b8a7c0a49b434533b3f6358b66193811 2024-11-20T17:21:16,692 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b8a7c0a49b434533b3f6358b66193811, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T17:21:16,695 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=60.38 KB/61830 for d436a1ae301ec26cf78d29bd05a18bd2 in 180ms, sequenceid=312, compaction requested=false 2024-11-20T17:21:16,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T17:21:16,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T17:21:16,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T17:21:16,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 643 msec 2024-11-20T17:21:16,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:16,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:21:16,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:16,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:16,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:16,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 651 msec 2024-11-20T17:21:16,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e2d52ea2226141e59c9bd3e0432357bb is 50, key is test_row_0/A:col10/1732123276698/Put/seqid=0 2024-11-20T17:21:16,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741904_1080 (size=22065) 2024-11-20T17:21:16,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e2d52ea2226141e59c9bd3e0432357bb 2024-11-20T17:21:16,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/205284ea80a0416baeb7da2ad8512d85 is 50, key is test_row_0/B:col10/1732123276698/Put/seqid=0 2024-11-20T17:21:16,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741905_1081 (size=12301) 2024-11-20T17:21:16,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/205284ea80a0416baeb7da2ad8512d85 2024-11-20T17:21:16,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3a759007ac604b19b51609fd62d238cd is 50, key is test_row_0/C:col10/1732123276698/Put/seqid=0 2024-11-20T17:21:16,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741906_1082 (size=12301) 2024-11-20T17:21:16,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3a759007ac604b19b51609fd62d238cd 2024-11-20T17:21:16,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e2d52ea2226141e59c9bd3e0432357bb as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e2d52ea2226141e59c9bd3e0432357bb 2024-11-20T17:21:16,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e2d52ea2226141e59c9bd3e0432357bb, entries=350, sequenceid=325, filesize=21.5 K 2024-11-20T17:21:16,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/205284ea80a0416baeb7da2ad8512d85 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/205284ea80a0416baeb7da2ad8512d85 2024-11-20T17:21:16,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/205284ea80a0416baeb7da2ad8512d85, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T17:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3a759007ac604b19b51609fd62d238cd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a759007ac604b19b51609fd62d238cd 2024-11-20T17:21:16,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a759007ac604b19b51609fd62d238cd, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T17:21:16,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=93.93 KB/96180 for d436a1ae301ec26cf78d29bd05a18bd2 in 114ms, sequenceid=325, compaction requested=true 2024-11-20T17:21:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:16,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T17:21:16,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:16,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T17:21:16,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:16,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:16,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:16,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:16,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/6483edd9be424880bace3c4c612dff83 is 50, key is test_row_0/A:col10/1732123276803/Put/seqid=0 2024-11-20T17:21:16,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741907_1083 (size=14741) 2024-11-20T17:21:16,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/6483edd9be424880bace3c4c612dff83 2024-11-20T17:21:16,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123336868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123336870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/c341bc43f8ed4041922c7d211e51b217 is 50, key is test_row_0/B:col10/1732123276803/Put/seqid=0 2024-11-20T17:21:16,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741908_1084 (size=12301) 2024-11-20T17:21:16,910 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/83b55334c3654a02a04cfa676662e5a3 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/83b55334c3654a02a04cfa676662e5a3 2024-11-20T17:21:16,919 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 83b55334c3654a02a04cfa676662e5a3(size=12.7 K), total size for store is 46.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:16,920 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,920 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123276446; duration=0sec 2024-11-20T17:21:16,920 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T17:21:16,920 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:16,920 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-11-20T17:21:16,921 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:16,921 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:16,922 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:16,922 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:16,922 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:16,923 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:16,924 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:16,924 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:16,925 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,925 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/43c2d327c83b4c2c8f8dfb57a4d254d2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f17806586d264c3fb82f1948aa37a29a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/205284ea80a0416baeb7da2ad8512d85] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.7 K 2024-11-20T17:21:16,925 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43c2d327c83b4c2c8f8dfb57a4d254d2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732123275838 2024-11-20T17:21:16,926 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting f17806586d264c3fb82f1948aa37a29a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732123276032 2024-11-20T17:21:16,926 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 205284ea80a0416baeb7da2ad8512d85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123276681 2024-11-20T17:21:16,949 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#70 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:16,953 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/29d0b5c73719449284ac177fbac5dde1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/29d0b5c73719449284ac177fbac5dde1 2024-11-20T17:21:16,962 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7aa054b5f866455e932a61ba9b46f9d1 is 50, key is test_row_0/B:col10/1732123276698/Put/seqid=0 2024-11-20T17:21:16,963 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 29d0b5c73719449284ac177fbac5dde1(size=12.6 K), total size for store is 36.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:16,963 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,963 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=12, startTime=1732123276447; duration=0sec 2024-11-20T17:21:16,964 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:16,964 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:16,964 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:16,965 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47349 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:16,966 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:16,966 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:16,966 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/83b55334c3654a02a04cfa676662e5a3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/742de9f588ef4ec595f02fa68b809209, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e2d52ea2226141e59c9bd3e0432357bb] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=46.2 K 2024-11-20T17:21:16,967 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 83b55334c3654a02a04cfa676662e5a3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732123275838 2024-11-20T17:21:16,967 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 742de9f588ef4ec595f02fa68b809209, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732123276032 2024-11-20T17:21:16,968 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e2d52ea2226141e59c9bd3e0432357bb, keycount=350, bloomtype=ROW, size=21.5 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123276665 2024-11-20T17:21:16,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741909_1085 (size=13085) 2024-11-20T17:21:16,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123336973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123336976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:16,982 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#71 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:16,983 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e3277cac91dc4a14b66d9926404e0954 is 50, key is test_row_0/A:col10/1732123276698/Put/seqid=0 2024-11-20T17:21:16,985 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7aa054b5f866455e932a61ba9b46f9d1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7aa054b5f866455e932a61ba9b46f9d1 2024-11-20T17:21:16,995 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 7aa054b5f866455e932a61ba9b46f9d1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:16,995 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:16,995 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123276814; duration=0sec 2024-11-20T17:21:16,995 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:16,995 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:17,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741910_1086 (size=13085) 2024-11-20T17:21:17,015 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e3277cac91dc4a14b66d9926404e0954 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e3277cac91dc4a14b66d9926404e0954 2024-11-20T17:21:17,024 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into e3277cac91dc4a14b66d9926404e0954(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:17,024 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:17,024 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123276814; duration=0sec 2024-11-20T17:21:17,025 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:17,025 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:17,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:21:17,158 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T17:21:17,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T17:21:17,163 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:21:17,164 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:17,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:17,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123337179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123337181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:21:17,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/c341bc43f8ed4041922c7d211e51b217 2024-11-20T17:21:17,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/be11974f3c2b430a8e145cb883e5df07 is 50, key is test_row_0/C:col10/1732123276803/Put/seqid=0 2024-11-20T17:21:17,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:17,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T17:21:17,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:17,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:17,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:17,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:17,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:17,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:17,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741911_1087 (size=12301) 2024-11-20T17:21:17,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/be11974f3c2b430a8e145cb883e5df07 2024-11-20T17:21:17,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/6483edd9be424880bace3c4c612dff83 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/6483edd9be424880bace3c4c612dff83 2024-11-20T17:21:17,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/6483edd9be424880bace3c4c612dff83, entries=200, sequenceid=343, filesize=14.4 K 2024-11-20T17:21:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/c341bc43f8ed4041922c7d211e51b217 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/c341bc43f8ed4041922c7d211e51b217 2024-11-20T17:21:17,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/c341bc43f8ed4041922c7d211e51b217, entries=150, sequenceid=343, filesize=12.0 K 2024-11-20T17:21:17,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/be11974f3c2b430a8e145cb883e5df07 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/be11974f3c2b430a8e145cb883e5df07 2024-11-20T17:21:17,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/be11974f3c2b430a8e145cb883e5df07, entries=150, sequenceid=343, filesize=12.0 K 2024-11-20T17:21:17,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d436a1ae301ec26cf78d29bd05a18bd2 in 552ms, sequenceid=343, compaction requested=true 2024-11-20T17:21:17,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:17,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:17,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:17,371 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-20T17:21:17,371 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-20T17:21:17,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:17,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:17,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:17,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:17,372 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:17,374 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:17,374 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:17,374 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:17,374 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/29d0b5c73719449284ac177fbac5dde1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b8a7c0a49b434533b3f6358b66193811, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a759007ac604b19b51609fd62d238cd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/be11974f3c2b430a8e145cb883e5df07] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=48.7 K 2024-11-20T17:21:17,375 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29d0b5c73719449284ac177fbac5dde1, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732123275838 2024-11-20T17:21:17,375 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8a7c0a49b434533b3f6358b66193811, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732123276032 2024-11-20T17:21:17,376 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a759007ac604b19b51609fd62d238cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123276681 2024-11-20T17:21:17,376 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting be11974f3c2b430a8e145cb883e5df07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732123276783 2024-11-20T17:21:17,393 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#73 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:17,394 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/efd7fb383f1040868bc39ad86ccdf215 is 50, key is test_row_0/C:col10/1732123276803/Put/seqid=0 2024-11-20T17:21:17,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741912_1088 (size=13085) 2024-11-20T17:21:17,422 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/efd7fb383f1040868bc39ad86ccdf215 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/efd7fb383f1040868bc39ad86ccdf215 2024-11-20T17:21:17,432 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into efd7fb383f1040868bc39ad86ccdf215(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:17,432 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:17,432 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=12, startTime=1732123277371; duration=0sec 2024-11-20T17:21:17,432 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:17,432 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:17,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:21:17,470 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:17,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T17:21:17,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:17,472 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T17:21:17,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:17,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:17,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:17,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:17,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:17,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:17,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/07578e696ef546cfb8fd81d6ad72d5f2 is 50, key is test_row_0/A:col10/1732123276843/Put/seqid=0 2024-11-20T17:21:17,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:17,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:17,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741913_1089 (size=12301) 2024-11-20T17:21:17,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123337515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123337519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123337620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123337623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:21:17,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123337822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:17,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123337828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:17,908 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/07578e696ef546cfb8fd81d6ad72d5f2 2024-11-20T17:21:17,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/85a19cd7a7504fdcbd64f9799acae102 is 50, key is test_row_0/B:col10/1732123276843/Put/seqid=0 2024-11-20T17:21:17,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741914_1090 (size=12301) 2024-11-20T17:21:17,952 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/85a19cd7a7504fdcbd64f9799acae102 2024-11-20T17:21:17,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e7de7ce26ad24469b5d8666f90970233 is 50, key is test_row_0/C:col10/1732123276843/Put/seqid=0 2024-11-20T17:21:17,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741915_1091 (size=12301) 2024-11-20T17:21:18,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:18,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123338125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:18,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:18,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123338135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:18,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:21:18,383 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e7de7ce26ad24469b5d8666f90970233 2024-11-20T17:21:18,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/07578e696ef546cfb8fd81d6ad72d5f2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/07578e696ef546cfb8fd81d6ad72d5f2 2024-11-20T17:21:18,397 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/07578e696ef546cfb8fd81d6ad72d5f2, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T17:21:18,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/85a19cd7a7504fdcbd64f9799acae102 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/85a19cd7a7504fdcbd64f9799acae102 2024-11-20T17:21:18,406 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/85a19cd7a7504fdcbd64f9799acae102, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T17:21:18,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/e7de7ce26ad24469b5d8666f90970233 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e7de7ce26ad24469b5d8666f90970233 2024-11-20T17:21:18,415 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e7de7ce26ad24469b5d8666f90970233, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T17:21:18,417 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d436a1ae301ec26cf78d29bd05a18bd2 in 946ms, sequenceid=366, compaction requested=true 2024-11-20T17:21:18,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:18,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:18,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T17:21:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T17:21:18,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T17:21:18,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2560 sec 2024-11-20T17:21:18,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.2630 sec 2024-11-20T17:21:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:18,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T17:21:18,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:18,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:18,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:18,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:18,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:18,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:18,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a423e8be77b744b985544a3808f085f6 is 50, key is test_row_0/A:col10/1732123278631/Put/seqid=0 2024-11-20T17:21:18,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741916_1092 (size=14741) 2024-11-20T17:21:18,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123338694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:18,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123338696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:18,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123338799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:18,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:18,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123338799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123339002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123339003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a423e8be77b744b985544a3808f085f6 2024-11-20T17:21:19,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/df9b57b011784809bdee989f502b6c8e is 50, key is test_row_0/B:col10/1732123278631/Put/seqid=0 2024-11-20T17:21:19,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741917_1093 (size=12301) 2024-11-20T17:21:19,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:21:19,270 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T17:21:19,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:19,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T17:21:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:21:19,274 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:19,275 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:19,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:19,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123339306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123339307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47542 deadline: 1732123339357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,360 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:19,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47566 deadline: 1732123339369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,370 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:19,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:21:19,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47536 deadline: 1732123339376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,379 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:19,427 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:19,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T17:21:19,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:19,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:19,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:19,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:19,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:19,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:19,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/df9b57b011784809bdee989f502b6c8e 2024-11-20T17:21:19,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/82b77a6aaccb4feea9263e1281e81fc2 is 50, key is test_row_0/C:col10/1732123278631/Put/seqid=0 2024-11-20T17:21:19,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741918_1094 (size=12301) 2024-11-20T17:21:19,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/82b77a6aaccb4feea9263e1281e81fc2 2024-11-20T17:21:19,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a423e8be77b744b985544a3808f085f6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a423e8be77b744b985544a3808f085f6 2024-11-20T17:21:19,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a423e8be77b744b985544a3808f085f6, entries=200, sequenceid=385, filesize=14.4 K 2024-11-20T17:21:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/df9b57b011784809bdee989f502b6c8e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/df9b57b011784809bdee989f502b6c8e 2024-11-20T17:21:19,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/df9b57b011784809bdee989f502b6c8e, entries=150, sequenceid=385, filesize=12.0 K 2024-11-20T17:21:19,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/82b77a6aaccb4feea9263e1281e81fc2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/82b77a6aaccb4feea9263e1281e81fc2 2024-11-20T17:21:19,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/82b77a6aaccb4feea9263e1281e81fc2, entries=150, sequenceid=385, filesize=12.0 K 2024-11-20T17:21:19,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d436a1ae301ec26cf78d29bd05a18bd2 in 899ms, sequenceid=385, compaction requested=true 2024-11-20T17:21:19,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:19,532 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:19,533 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:19,535 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54868 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:19,535 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:19,535 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:19,535 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e3277cac91dc4a14b66d9926404e0954, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/6483edd9be424880bace3c4c612dff83, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/07578e696ef546cfb8fd81d6ad72d5f2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a423e8be77b744b985544a3808f085f6] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=53.6 K 2024-11-20T17:21:19,536 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:19,536 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:19,536 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:19,536 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7aa054b5f866455e932a61ba9b46f9d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/c341bc43f8ed4041922c7d211e51b217, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/85a19cd7a7504fdcbd64f9799acae102, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/df9b57b011784809bdee989f502b6c8e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=48.8 K 2024-11-20T17:21:19,537 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3277cac91dc4a14b66d9926404e0954, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123276681 2024-11-20T17:21:19,537 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aa054b5f866455e932a61ba9b46f9d1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123276681 2024-11-20T17:21:19,537 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c341bc43f8ed4041922c7d211e51b217, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732123276783 2024-11-20T17:21:19,538 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6483edd9be424880bace3c4c612dff83, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732123276783 2024-11-20T17:21:19,538 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 85a19cd7a7504fdcbd64f9799acae102, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732123276843 2024-11-20T17:21:19,538 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07578e696ef546cfb8fd81d6ad72d5f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732123276843 2024-11-20T17:21:19,538 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting df9b57b011784809bdee989f502b6c8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732123277507 2024-11-20T17:21:19,539 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a423e8be77b744b985544a3808f085f6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732123277507 2024-11-20T17:21:19,552 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:19,553 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:19,553 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/9bd615511bda4a268c0371a7698cfd55 is 50, key is test_row_0/A:col10/1732123278631/Put/seqid=0 2024-11-20T17:21:19,554 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/59c21e97516a49728010d8e2de8f2511 is 50, key is test_row_0/B:col10/1732123278631/Put/seqid=0 2024-11-20T17:21:19,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741919_1095 (size=13221) 2024-11-20T17:21:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741920_1096 (size=13221) 2024-11-20T17:21:19,573 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/59c21e97516a49728010d8e2de8f2511 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/59c21e97516a49728010d8e2de8f2511 2024-11-20T17:21:19,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:21:19,580 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 59c21e97516a49728010d8e2de8f2511(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:19,580 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:19,580 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=12, startTime=1732123279533; duration=0sec 2024-11-20T17:21:19,580 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:19,581 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:19,581 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:19,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:19,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T17:21:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:19,582 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T17:21:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:19,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:19,583 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:19,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:19,583 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:19,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:19,583 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:19,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:19,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:19,583 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/efd7fb383f1040868bc39ad86ccdf215, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e7de7ce26ad24469b5d8666f90970233, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/82b77a6aaccb4feea9263e1281e81fc2] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.8 K 2024-11-20T17:21:19,583 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting efd7fb383f1040868bc39ad86ccdf215, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732123276783 2024-11-20T17:21:19,584 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e7de7ce26ad24469b5d8666f90970233, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732123276843 2024-11-20T17:21:19,585 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 82b77a6aaccb4feea9263e1281e81fc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732123277507 2024-11-20T17:21:19,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1456a3a49ac34efea63ca64ed5f91c59 is 50, key is test_row_0/A:col10/1732123278690/Put/seqid=0 2024-11-20T17:21:19,598 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#83 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:19,598 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/6e6542e4f6944ac593934f7589fc464e is 50, key is test_row_0/C:col10/1732123278631/Put/seqid=0 2024-11-20T17:21:19,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741921_1097 (size=12301) 2024-11-20T17:21:19,602 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1456a3a49ac34efea63ca64ed5f91c59 2024-11-20T17:21:19,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/b02629e3a7c349ccbf91f120dfd63b05 is 50, key is test_row_0/B:col10/1732123278690/Put/seqid=0 2024-11-20T17:21:19,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741922_1098 (size=13187) 2024-11-20T17:21:19,654 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/6e6542e4f6944ac593934f7589fc464e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6e6542e4f6944ac593934f7589fc464e 2024-11-20T17:21:19,663 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 6e6542e4f6944ac593934f7589fc464e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:19,663 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:19,663 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=13, startTime=1732123279533; duration=0sec 2024-11-20T17:21:19,663 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:19,663 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:19,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741923_1099 (size=12301) 2024-11-20T17:21:19,693 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/b02629e3a7c349ccbf91f120dfd63b05 2024-11-20T17:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/72979612bb9840e9a5fca97f5a15ed19 is 50, key is test_row_0/C:col10/1732123278690/Put/seqid=0 2024-11-20T17:21:19,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741924_1100 (size=12301) 2024-11-20T17:21:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:19,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:19,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123339835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123339836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:21:19,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123339938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123339940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:19,968 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/9bd615511bda4a268c0371a7698cfd55 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/9bd615511bda4a268c0371a7698cfd55 2024-11-20T17:21:19,975 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 9bd615511bda4a268c0371a7698cfd55(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:19,975 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:19,975 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=12, startTime=1732123279532; duration=0sec 2024-11-20T17:21:19,976 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:19,976 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:20,116 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/72979612bb9840e9a5fca97f5a15ed19 2024-11-20T17:21:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1456a3a49ac34efea63ca64ed5f91c59 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1456a3a49ac34efea63ca64ed5f91c59 2024-11-20T17:21:20,129 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1456a3a49ac34efea63ca64ed5f91c59, entries=150, sequenceid=403, filesize=12.0 K 2024-11-20T17:21:20,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/b02629e3a7c349ccbf91f120dfd63b05 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/b02629e3a7c349ccbf91f120dfd63b05 2024-11-20T17:21:20,136 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/b02629e3a7c349ccbf91f120dfd63b05, entries=150, sequenceid=403, filesize=12.0 K 2024-11-20T17:21:20,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/72979612bb9840e9a5fca97f5a15ed19 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/72979612bb9840e9a5fca97f5a15ed19 2024-11-20T17:21:20,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123340140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123340143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,145 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/72979612bb9840e9a5fca97f5a15ed19, entries=150, sequenceid=403, filesize=12.0 K 2024-11-20T17:21:20,146 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for d436a1ae301ec26cf78d29bd05a18bd2 in 564ms, sequenceid=403, compaction requested=false 2024-11-20T17:21:20,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:20,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:20,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T17:21:20,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T17:21:20,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T17:21:20,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 873 msec 2024-11-20T17:21:20,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 878 msec 2024-11-20T17:21:20,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:21:20,382 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T17:21:20,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:20,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T17:21:20,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:21:20,387 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:20,389 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:20,389 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:20,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:20,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T17:21:20,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:20,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:20,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:20,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:20,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:20,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:20,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/f678be0fa2514abfa68a6135ff608982 is 50, key is test_row_0/A:col10/1732123280448/Put/seqid=0 2024-11-20T17:21:20,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123340472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741925_1101 (size=14741) 2024-11-20T17:21:20,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/f678be0fa2514abfa68a6135ff608982 2024-11-20T17:21:20,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123340473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/0c7f794644204c31baf8917ffe9c4d6b is 50, key is test_row_0/B:col10/1732123280448/Put/seqid=0 2024-11-20T17:21:20,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:21:20,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741926_1102 (size=12301) 2024-11-20T17:21:20,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/0c7f794644204c31baf8917ffe9c4d6b 2024-11-20T17:21:20,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/096598ba46174ca8a539f1e447eb1889 is 50, key is test_row_0/C:col10/1732123280448/Put/seqid=0 2024-11-20T17:21:20,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741927_1103 (size=12301) 2024-11-20T17:21:20,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/096598ba46174ca8a539f1e447eb1889 2024-11-20T17:21:20,542 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:20,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:21:20,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:20,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:20,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:20,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:20,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:20,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/f678be0fa2514abfa68a6135ff608982 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/f678be0fa2514abfa68a6135ff608982 2024-11-20T17:21:20,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/f678be0fa2514abfa68a6135ff608982, entries=200, sequenceid=426, filesize=14.4 K 2024-11-20T17:21:20,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/0c7f794644204c31baf8917ffe9c4d6b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/0c7f794644204c31baf8917ffe9c4d6b 2024-11-20T17:21:20,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/0c7f794644204c31baf8917ffe9c4d6b, entries=150, sequenceid=426, filesize=12.0 K 2024-11-20T17:21:20,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/096598ba46174ca8a539f1e447eb1889 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/096598ba46174ca8a539f1e447eb1889 2024-11-20T17:21:20,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123340575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/096598ba46174ca8a539f1e447eb1889, entries=150, sequenceid=426, filesize=12.0 K 2024-11-20T17:21:20,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123340581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for d436a1ae301ec26cf78d29bd05a18bd2 in 133ms, sequenceid=426, compaction requested=true 2024-11-20T17:21:20,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:20,583 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:20,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:20,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:20,584 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:20,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:20,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:20,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:20,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:20,585 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:20,585 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:20,586 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:20,586 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:20,586 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:20,586 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:20,586 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/9bd615511bda4a268c0371a7698cfd55, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1456a3a49ac34efea63ca64ed5f91c59, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/f678be0fa2514abfa68a6135ff608982] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=39.3 K 2024-11-20T17:21:20,586 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/59c21e97516a49728010d8e2de8f2511, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/b02629e3a7c349ccbf91f120dfd63b05, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/0c7f794644204c31baf8917ffe9c4d6b] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.9 K 2024-11-20T17:21:20,586 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bd615511bda4a268c0371a7698cfd55, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732123277507 2024-11-20T17:21:20,586 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 59c21e97516a49728010d8e2de8f2511, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732123277507 2024-11-20T17:21:20,587 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1456a3a49ac34efea63ca64ed5f91c59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732123278690 2024-11-20T17:21:20,587 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting b02629e3a7c349ccbf91f120dfd63b05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732123278690 2024-11-20T17:21:20,588 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c7f794644204c31baf8917ffe9c4d6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732123279834 2024-11-20T17:21:20,588 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting f678be0fa2514abfa68a6135ff608982, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732123279830 2024-11-20T17:21:20,599 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#89 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:20,600 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a876dd9fe3af40c080008bb1fb32ac99 is 50, key is test_row_0/A:col10/1732123280448/Put/seqid=0 2024-11-20T17:21:20,603 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:20,604 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/289338eddda4495bbd493a7d9e318c7d is 50, key is test_row_0/B:col10/1732123280448/Put/seqid=0 2024-11-20T17:21:20,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741928_1104 (size=13323) 2024-11-20T17:21:20,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741929_1105 (size=13323) 2024-11-20T17:21:20,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:21:20,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:20,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:21:20,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:20,698 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T17:21:20,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:20,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:20,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:20,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:20,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:20,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:20,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/2804cd3b15024815839b63ebd0d38071 is 50, key is test_row_0/A:col10/1732123280465/Put/seqid=0 2024-11-20T17:21:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741930_1106 (size=12301) 2024-11-20T17:21:20,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:20,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:20,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123340827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123340828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123340929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:20,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123340931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:20,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:21:21,028 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/a876dd9fe3af40c080008bb1fb32ac99 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a876dd9fe3af40c080008bb1fb32ac99 2024-11-20T17:21:21,038 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into a876dd9fe3af40c080008bb1fb32ac99(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:21,038 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:21,038 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123280583; duration=0sec 2024-11-20T17:21:21,038 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/289338eddda4495bbd493a7d9e318c7d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/289338eddda4495bbd493a7d9e318c7d 2024-11-20T17:21:21,039 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:21,039 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:21,040 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:21,041 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:21,041 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:21,041 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:21,041 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6e6542e4f6944ac593934f7589fc464e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/72979612bb9840e9a5fca97f5a15ed19, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/096598ba46174ca8a539f1e447eb1889] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=36.9 K 2024-11-20T17:21:21,042 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e6542e4f6944ac593934f7589fc464e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732123277507 2024-11-20T17:21:21,042 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72979612bb9840e9a5fca97f5a15ed19, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732123278690 2024-11-20T17:21:21,043 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 096598ba46174ca8a539f1e447eb1889, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732123279834 2024-11-20T17:21:21,046 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 289338eddda4495bbd493a7d9e318c7d(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:21,046 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:21,046 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123280584; duration=0sec 2024-11-20T17:21:21,046 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:21,046 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:21,052 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#92 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:21,052 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/f904697d903244ca94a8c78b9015ec33 is 50, key is test_row_0/C:col10/1732123280448/Put/seqid=0 2024-11-20T17:21:21,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741931_1107 (size=13289) 2024-11-20T17:21:21,119 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/2804cd3b15024815839b63ebd0d38071 2024-11-20T17:21:21,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/e7c93b96fe5b49929375dd85424a585d is 50, key is test_row_0/B:col10/1732123280465/Put/seqid=0 2024-11-20T17:21:21,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741932_1108 (size=12301) 2024-11-20T17:21:21,134 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/e7c93b96fe5b49929375dd85424a585d 2024-11-20T17:21:21,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:21,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123341134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:21,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:21,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123341136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:21,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/0de2b7dfc704455c9ef45d241e962888 is 50, key is test_row_0/C:col10/1732123280465/Put/seqid=0 2024-11-20T17:21:21,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741933_1109 (size=12301) 2024-11-20T17:21:21,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:21,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123341437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:21,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123341441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:21,466 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/f904697d903244ca94a8c78b9015ec33 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/f904697d903244ca94a8c78b9015ec33 2024-11-20T17:21:21,473 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into f904697d903244ca94a8c78b9015ec33(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:21,473 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:21,473 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=13, startTime=1732123280584; duration=0sec 2024-11-20T17:21:21,473 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:21,473 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:21:21,549 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/0de2b7dfc704455c9ef45d241e962888 2024-11-20T17:21:21,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/2804cd3b15024815839b63ebd0d38071 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/2804cd3b15024815839b63ebd0d38071 2024-11-20T17:21:21,563 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/2804cd3b15024815839b63ebd0d38071, entries=150, sequenceid=441, filesize=12.0 K 2024-11-20T17:21:21,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/e7c93b96fe5b49929375dd85424a585d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e7c93b96fe5b49929375dd85424a585d 2024-11-20T17:21:21,570 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e7c93b96fe5b49929375dd85424a585d, entries=150, sequenceid=441, filesize=12.0 K 2024-11-20T17:21:21,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/0de2b7dfc704455c9ef45d241e962888 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/0de2b7dfc704455c9ef45d241e962888 2024-11-20T17:21:21,576 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/0de2b7dfc704455c9ef45d241e962888, entries=150, sequenceid=441, filesize=12.0 K 2024-11-20T17:21:21,578 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for d436a1ae301ec26cf78d29bd05a18bd2 in 879ms, sequenceid=441, compaction requested=false 2024-11-20T17:21:21,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:21,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:21,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T17:21:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T17:21:21,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T17:21:21,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1910 sec 2024-11-20T17:21:21,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.1980 sec 2024-11-20T17:21:21,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:21,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:21:21,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:21,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:21,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:21,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:21,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:21,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:21,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e701033ab6d24f2f83b8755bcf94766e is 50, key is test_row_0/A:col10/1732123281939/Put/seqid=0 2024-11-20T17:21:21,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741934_1110 (size=12301) 2024-11-20T17:21:21,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:21,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123341960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:21,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:21,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123341961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123342063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123342064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:22,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123342265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:22,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123342268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e701033ab6d24f2f83b8755bcf94766e 2024-11-20T17:21:22,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/492ed36b379e44ae82b029d10ef43c1c is 50, key is test_row_0/B:col10/1732123281939/Put/seqid=0 2024-11-20T17:21:22,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741935_1111 (size=12301) 2024-11-20T17:21:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:21:22,491 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T17:21:22,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T17:21:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:21:22,495 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:22,495 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:22,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:22,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123342568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123342571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:21:22,647 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:22,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T17:21:22,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:22,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:22,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:22,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/492ed36b379e44ae82b029d10ef43c1c 2024-11-20T17:21:22,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/babfa8d496e8441c95ae6a1c0b75397b is 50, key is test_row_0/C:col10/1732123281939/Put/seqid=0 2024-11-20T17:21:22,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741936_1112 (size=12301) 2024-11-20T17:21:22,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:21:22,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:22,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T17:21:22,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:22,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:22,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:22,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:22,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T17:21:22,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:22,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:22,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:22,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:22,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:23,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:23,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123343073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:23,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123343073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:23,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:21:23,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:23,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T17:21:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:23,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:23,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:23,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:23,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/babfa8d496e8441c95ae6a1c0b75397b 2024-11-20T17:21:23,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/e701033ab6d24f2f83b8755bcf94766e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e701033ab6d24f2f83b8755bcf94766e 2024-11-20T17:21:23,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e701033ab6d24f2f83b8755bcf94766e, entries=150, sequenceid=467, filesize=12.0 K 2024-11-20T17:21:23,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/492ed36b379e44ae82b029d10ef43c1c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/492ed36b379e44ae82b029d10ef43c1c 2024-11-20T17:21:23,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/492ed36b379e44ae82b029d10ef43c1c, entries=150, sequenceid=467, filesize=12.0 K 2024-11-20T17:21:23,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/babfa8d496e8441c95ae6a1c0b75397b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/babfa8d496e8441c95ae6a1c0b75397b 2024-11-20T17:21:23,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/babfa8d496e8441c95ae6a1c0b75397b, entries=150, sequenceid=467, filesize=12.0 K 2024-11-20T17:21:23,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d436a1ae301ec26cf78d29bd05a18bd2 in 1278ms, sequenceid=467, compaction requested=true 2024-11-20T17:21:23,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:23,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:23,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:23,220 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:23,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:23,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:23,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:23,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:21:23,220 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:23,221 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:23,221 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:23,222 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:23,222 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:23,222 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:23,222 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:23,222 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a876dd9fe3af40c080008bb1fb32ac99, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/2804cd3b15024815839b63ebd0d38071, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e701033ab6d24f2f83b8755bcf94766e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=37.0 K 2024-11-20T17:21:23,222 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/289338eddda4495bbd493a7d9e318c7d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e7c93b96fe5b49929375dd85424a585d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/492ed36b379e44ae82b029d10ef43c1c] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=37.0 K 2024-11-20T17:21:23,222 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a876dd9fe3af40c080008bb1fb32ac99, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732123279834 2024-11-20T17:21:23,222 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 289338eddda4495bbd493a7d9e318c7d, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732123279834 2024-11-20T17:21:23,223 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e7c93b96fe5b49929375dd85424a585d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732123280465 2024-11-20T17:21:23,223 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2804cd3b15024815839b63ebd0d38071, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732123280465 2024-11-20T17:21:23,224 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting e701033ab6d24f2f83b8755bcf94766e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732123280820 2024-11-20T17:21:23,224 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 492ed36b379e44ae82b029d10ef43c1c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732123280820 2024-11-20T17:21:23,246 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:23,247 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/1871216eae244a4897d420c2259169fc is 50, key is test_row_0/B:col10/1732123281939/Put/seqid=0 2024-11-20T17:21:23,253 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#99 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:23,254 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/4182e61fb20f41b3b91e11050fc21092 is 50, key is test_row_0/A:col10/1732123281939/Put/seqid=0 2024-11-20T17:21:23,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741937_1113 (size=13425) 2024-11-20T17:21:23,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741938_1114 (size=13425) 2024-11-20T17:21:23,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:23,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T17:21:23,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:23,263 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:21:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:23,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:23,268 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/1871216eae244a4897d420c2259169fc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1871216eae244a4897d420c2259169fc 2024-11-20T17:21:23,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1ab8d41635c6461ba6a71e2c360ddc9f is 50, key is test_row_0/A:col10/1732123281960/Put/seqid=0 2024-11-20T17:21:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741939_1115 (size=12301) 2024-11-20T17:21:23,277 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into 1871216eae244a4897d420c2259169fc(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:23,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:23,278 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123283220; duration=0sec 2024-11-20T17:21:23,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:23,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:23,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:23,279 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:23,279 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:23,279 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:23,280 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/f904697d903244ca94a8c78b9015ec33, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/0de2b7dfc704455c9ef45d241e962888, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/babfa8d496e8441c95ae6a1c0b75397b] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=37.0 K 2024-11-20T17:21:23,280 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f904697d903244ca94a8c78b9015ec33, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732123279834 2024-11-20T17:21:23,281 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0de2b7dfc704455c9ef45d241e962888, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1732123280465 2024-11-20T17:21:23,281 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting babfa8d496e8441c95ae6a1c0b75397b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732123280820 2024-11-20T17:21:23,291 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#101 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:23,292 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3a659f1508f34ce2b780d902f5d962c5 is 50, key is test_row_0/C:col10/1732123281939/Put/seqid=0 2024-11-20T17:21:23,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741940_1116 (size=13391) 2024-11-20T17:21:23,313 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/3a659f1508f34ce2b780d902f5d962c5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a659f1508f34ce2b780d902f5d962c5 2024-11-20T17:21:23,320 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 3a659f1508f34ce2b780d902f5d962c5(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:23,320 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:23,321 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=13, startTime=1732123283220; duration=0sec 2024-11-20T17:21:23,321 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:23,321 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:23,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:21:23,668 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/4182e61fb20f41b3b91e11050fc21092 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4182e61fb20f41b3b91e11050fc21092 2024-11-20T17:21:23,673 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1ab8d41635c6461ba6a71e2c360ddc9f 2024-11-20T17:21:23,677 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 4182e61fb20f41b3b91e11050fc21092(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:23,677 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:23,677 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123283219; duration=0sec 2024-11-20T17:21:23,677 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:23,677 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:23,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/02342d7b6cb14736abacf01b9856d345 is 50, key is test_row_0/B:col10/1732123281960/Put/seqid=0 2024-11-20T17:21:23,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741941_1117 (size=12301) 2024-11-20T17:21:24,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:24,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:24,091 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/02342d7b6cb14736abacf01b9856d345 2024-11-20T17:21:24,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/b0626bbfe70a48a18262cf97b624ac59 is 50, key is test_row_0/C:col10/1732123281960/Put/seqid=0 2024-11-20T17:21:24,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741942_1118 (size=12301) 2024-11-20T17:21:24,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123344109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123344110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123344214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123344214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123344416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123344416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,508 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/b0626bbfe70a48a18262cf97b624ac59 2024-11-20T17:21:24,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1ab8d41635c6461ba6a71e2c360ddc9f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1ab8d41635c6461ba6a71e2c360ddc9f 2024-11-20T17:21:24,519 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1ab8d41635c6461ba6a71e2c360ddc9f, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T17:21:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/02342d7b6cb14736abacf01b9856d345 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/02342d7b6cb14736abacf01b9856d345 2024-11-20T17:21:24,524 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/02342d7b6cb14736abacf01b9856d345, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T17:21:24,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/b0626bbfe70a48a18262cf97b624ac59 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b0626bbfe70a48a18262cf97b624ac59 2024-11-20T17:21:24,530 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b0626bbfe70a48a18262cf97b624ac59, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T17:21:24,531 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d436a1ae301ec26cf78d29bd05a18bd2 in 1269ms, sequenceid=480, compaction requested=false 2024-11-20T17:21:24,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:24,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:24,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T17:21:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T17:21:24,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T17:21:24,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0370 sec 2024-11-20T17:21:24,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.0420 sec 2024-11-20T17:21:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:21:24,599 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T17:21:24,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T17:21:24,602 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:24,603 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:24,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:21:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:21:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:24,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:21:24,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:24,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:24,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:24,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:24,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:24,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:24,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/97975bdb221f47d7b280aabaddaf2e0b is 50, key is test_row_0/A:col10/1732123284720/Put/seqid=0 2024-11-20T17:21:24,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741943_1119 (size=12301) 2024-11-20T17:21:24,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123344742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123344743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,755 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:24,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:24,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:24,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:24,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:24,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:24,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:24,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123344844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123344845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:24,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:21:24,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:24,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:24,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:24,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:24,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:24,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123345047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:25,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123345048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:25,062 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/97975bdb221f47d7b280aabaddaf2e0b 2024-11-20T17:21:25,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/e8634455f57f472a9829010bbdadd848 is 50, key is test_row_0/B:col10/1732123284720/Put/seqid=0 2024-11-20T17:21:25,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741944_1120 (size=12301) 2024-11-20T17:21:25,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:21:25,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123345351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:25,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123345352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:25,368 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/e8634455f57f472a9829010bbdadd848 2024-11-20T17:21:25,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/8a89126b9e60495b9bab6722c40ef07f is 50, key is test_row_0/C:col10/1732123284720/Put/seqid=0 2024-11-20T17:21:25,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741945_1121 (size=12301) 2024-11-20T17:21:25,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:21:25,829 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123345857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:25,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123345858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:25,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/8a89126b9e60495b9bab6722c40ef07f 2024-11-20T17:21:25,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/97975bdb221f47d7b280aabaddaf2e0b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/97975bdb221f47d7b280aabaddaf2e0b 2024-11-20T17:21:25,983 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:25,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:25,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:25,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:25,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:25,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/97975bdb221f47d7b280aabaddaf2e0b, entries=150, sequenceid=508, filesize=12.0 K 2024-11-20T17:21:25,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/e8634455f57f472a9829010bbdadd848 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e8634455f57f472a9829010bbdadd848 2024-11-20T17:21:25,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e8634455f57f472a9829010bbdadd848, entries=150, sequenceid=508, filesize=12.0 K 2024-11-20T17:21:25,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/8a89126b9e60495b9bab6722c40ef07f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/8a89126b9e60495b9bab6722c40ef07f 2024-11-20T17:21:26,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/8a89126b9e60495b9bab6722c40ef07f, entries=150, sequenceid=508, filesize=12.0 K 2024-11-20T17:21:26,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d436a1ae301ec26cf78d29bd05a18bd2 in 1281ms, sequenceid=508, compaction requested=true 2024-11-20T17:21:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:26,003 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:26,003 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:26,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:26,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:26,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:26,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:26,004 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:26,004 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:26,004 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:26,005 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4182e61fb20f41b3b91e11050fc21092, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1ab8d41635c6461ba6a71e2c360ddc9f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/97975bdb221f47d7b280aabaddaf2e0b] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=37.1 K 2024-11-20T17:21:26,005 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:26,005 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:26,005 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4182e61fb20f41b3b91e11050fc21092, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732123280820 2024-11-20T17:21:26,005 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:26,005 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1871216eae244a4897d420c2259169fc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/02342d7b6cb14736abacf01b9856d345, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e8634455f57f472a9829010bbdadd848] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=37.1 K 2024-11-20T17:21:26,006 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 1871216eae244a4897d420c2259169fc, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732123280820 2024-11-20T17:21:26,006 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 02342d7b6cb14736abacf01b9856d345, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732123281954 2024-11-20T17:21:26,007 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e8634455f57f472a9829010bbdadd848, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732123284103 2024-11-20T17:21:26,008 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ab8d41635c6461ba6a71e2c360ddc9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732123281954 2024-11-20T17:21:26,008 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97975bdb221f47d7b280aabaddaf2e0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732123284103 2024-11-20T17:21:26,018 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#107 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:26,019 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/ed4f80081bea407ab7fc3da0454083aa is 50, key is test_row_0/B:col10/1732123284720/Put/seqid=0 2024-11-20T17:21:26,033 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#108 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:26,034 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/41bba098689349e0b17c1084def688d5 is 50, key is test_row_0/A:col10/1732123284720/Put/seqid=0 2024-11-20T17:21:26,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741946_1122 (size=13527) 2024-11-20T17:21:26,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741947_1123 (size=13527) 2024-11-20T17:21:26,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:26,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:21:26,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:26,138 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:26,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:26,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:26,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:26,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:26,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:26,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:26,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1c0c8e6435de4ed698839bd75129dcca is 50, key is test_row_0/A:col10/1732123284737/Put/seqid=0 2024-11-20T17:21:26,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741948_1124 (size=12301) 2024-11-20T17:21:26,162 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1c0c8e6435de4ed698839bd75129dcca 2024-11-20T17:21:26,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/dd57aa1288c343928f9e6b3b6556c84d is 50, key is test_row_0/B:col10/1732123284737/Put/seqid=0 2024-11-20T17:21:26,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741949_1125 (size=12301) 2024-11-20T17:21:26,187 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/dd57aa1288c343928f9e6b3b6556c84d 2024-11-20T17:21:26,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/c75bdb4ab61949238d22a0971a5132db is 50, key is test_row_0/C:col10/1732123284737/Put/seqid=0 2024-11-20T17:21:26,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741950_1126 (size=12301) 2024-11-20T17:21:26,446 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/ed4f80081bea407ab7fc3da0454083aa as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/ed4f80081bea407ab7fc3da0454083aa 2024-11-20T17:21:26,453 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/41bba098689349e0b17c1084def688d5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/41bba098689349e0b17c1084def688d5 2024-11-20T17:21:26,453 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into ed4f80081bea407ab7fc3da0454083aa(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:26,454 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:26,454 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=13, startTime=1732123286003; duration=0sec 2024-11-20T17:21:26,454 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:26,454 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:26,454 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:26,455 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:26,455 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/C is initiating minor compaction (all files) 2024-11-20T17:21:26,455 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/C in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:26,456 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a659f1508f34ce2b780d902f5d962c5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b0626bbfe70a48a18262cf97b624ac59, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/8a89126b9e60495b9bab6722c40ef07f] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=37.1 K 2024-11-20T17:21:26,457 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a659f1508f34ce2b780d902f5d962c5, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732123280820 2024-11-20T17:21:26,458 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting b0626bbfe70a48a18262cf97b624ac59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732123281954 2024-11-20T17:21:26,458 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a89126b9e60495b9bab6722c40ef07f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732123284103 2024-11-20T17:21:26,461 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into 41bba098689349e0b17c1084def688d5(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:26,461 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:26,461 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=13, startTime=1732123286002; duration=0sec 2024-11-20T17:21:26,461 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:26,461 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:26,468 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#C#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:26,468 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/199d8ed7b5894d67b99f4074beac9070 is 50, key is test_row_0/C:col10/1732123284720/Put/seqid=0 2024-11-20T17:21:26,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741951_1127 (size=13493) 2024-11-20T17:21:26,478 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/199d8ed7b5894d67b99f4074beac9070 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/199d8ed7b5894d67b99f4074beac9070 2024-11-20T17:21:26,485 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/C of d436a1ae301ec26cf78d29bd05a18bd2 into 199d8ed7b5894d67b99f4074beac9070(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:26,485 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:26,485 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/C, priority=13, startTime=1732123286003; duration=0sec 2024-11-20T17:21:26,485 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:26,485 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:26,602 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/c75bdb4ab61949238d22a0971a5132db 2024-11-20T17:21:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/1c0c8e6435de4ed698839bd75129dcca as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1c0c8e6435de4ed698839bd75129dcca 2024-11-20T17:21:26,617 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1c0c8e6435de4ed698839bd75129dcca, entries=150, sequenceid=519, filesize=12.0 K 2024-11-20T17:21:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/dd57aa1288c343928f9e6b3b6556c84d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/dd57aa1288c343928f9e6b3b6556c84d 2024-11-20T17:21:26,624 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/dd57aa1288c343928f9e6b3b6556c84d, entries=150, sequenceid=519, filesize=12.0 K 2024-11-20T17:21:26,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/c75bdb4ab61949238d22a0971a5132db as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c75bdb4ab61949238d22a0971a5132db 2024-11-20T17:21:26,631 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c75bdb4ab61949238d22a0971a5132db, entries=150, sequenceid=519, filesize=12.0 K 2024-11-20T17:21:26,632 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for d436a1ae301ec26cf78d29bd05a18bd2 in 494ms, sequenceid=519, compaction requested=false 2024-11-20T17:21:26,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:26,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:26,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T17:21:26,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T17:21:26,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T17:21:26,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0300 sec 2024-11-20T17:21:26,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.0340 sec 2024-11-20T17:21:26,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:21:26,712 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T17:21:26,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:26,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-20T17:21:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:21:26,715 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:26,716 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:26,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:26,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:21:26,868 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:26,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:21:26,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:26,869 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:21:26,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:26,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:26,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3abbdc6b4bdd4a799ba8f74975ca16c7 is 50, key is test_row_0/A:col10/1732123286866/Put/seqid=0 2024-11-20T17:21:26,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741952_1128 (size=12297) 2024-11-20T17:21:26,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 314 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123346901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:26,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123346902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123347004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:27,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 315 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123347004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:21:27,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:27,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 318 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123347207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 317 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123347208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,298 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3abbdc6b4bdd4a799ba8f74975ca16c7 2024-11-20T17:21:27,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7409bce27091450fa4faf57149cd7eff is 50, key is test_row_0/B:col10/1732123286866/Put/seqid=0 2024-11-20T17:21:27,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741953_1129 (size=9857) 2024-11-20T17:21:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:21:27,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 320 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123347512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 319 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123347513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:27,712 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7409bce27091450fa4faf57149cd7eff 2024-11-20T17:21:27,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/7093bafb3e62455e9d77c640a7106669 is 50, key is test_row_0/C:col10/1732123286866/Put/seqid=0 2024-11-20T17:21:27,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741954_1130 (size=9857) 2024-11-20T17:21:27,728 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/7093bafb3e62455e9d77c640a7106669 2024-11-20T17:21:27,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3abbdc6b4bdd4a799ba8f74975ca16c7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3abbdc6b4bdd4a799ba8f74975ca16c7 2024-11-20T17:21:27,739 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3abbdc6b4bdd4a799ba8f74975ca16c7, entries=150, sequenceid=531, filesize=12.0 K 2024-11-20T17:21:27,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/7409bce27091450fa4faf57149cd7eff as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7409bce27091450fa4faf57149cd7eff 2024-11-20T17:21:27,745 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7409bce27091450fa4faf57149cd7eff, entries=100, sequenceid=531, filesize=9.6 K 2024-11-20T17:21:27,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/7093bafb3e62455e9d77c640a7106669 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/7093bafb3e62455e9d77c640a7106669 2024-11-20T17:21:27,752 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/7093bafb3e62455e9d77c640a7106669, entries=100, sequenceid=531, filesize=9.6 K 2024-11-20T17:21:27,753 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for d436a1ae301ec26cf78d29bd05a18bd2 in 884ms, sequenceid=531, compaction requested=true 2024-11-20T17:21:27,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:27,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:27,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-20T17:21:27,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-20T17:21:27,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T17:21:27,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0380 sec 2024-11-20T17:21:27,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.0440 sec 2024-11-20T17:21:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:21:27,818 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T17:21:27,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-20T17:21:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:21:27,821 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:27,822 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:27,822 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:21:27,974 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:27,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:27,975 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T17:21:27,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:27,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:27,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:27,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:27,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:27,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:27,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/ff0733c2ba794e0b84dd1a60891457ac is 50, key is test_row_0/A:col10/1732123286900/Put/seqid=0 2024-11-20T17:21:27,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741955_1131 (size=12301) 2024-11-20T17:21:28,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:28,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. as already flushing 2024-11-20T17:21:28,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 324 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123348024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123348026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:21:28,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 326 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123348127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123348128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 328 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123348331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123348331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,387 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/ff0733c2ba794e0b84dd1a60891457ac 2024-11-20T17:21:28,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/69a7419a6f4743618c3347e291313081 is 50, key is test_row_0/B:col10/1732123286900/Put/seqid=0 2024-11-20T17:21:28,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741956_1132 (size=12301) 2024-11-20T17:21:28,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:21:28,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 330 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123348633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:28,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123348635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:28,803 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/69a7419a6f4743618c3347e291313081 2024-11-20T17:21:28,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/c52d5cd29657429b8b3df3f4f4b2754e is 50, key is test_row_0/C:col10/1732123286900/Put/seqid=0 2024-11-20T17:21:28,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741957_1133 (size=12301) 2024-11-20T17:21:28,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:21:29,070 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:55266 2024-11-20T17:21:29,070 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:55266 2024-11-20T17:21:29,070 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,070 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,071 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:55266 2024-11-20T17:21:29,071 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,073 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:55266 2024-11-20T17:21:29,073 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:29,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 332 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47556 deadline: 1732123349139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:29,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:29,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 333 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47516 deadline: 1732123349140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:29,217 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=558 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/c52d5cd29657429b8b3df3f4f4b2754e 2024-11-20T17:21:29,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/ff0733c2ba794e0b84dd1a60891457ac as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ff0733c2ba794e0b84dd1a60891457ac 2024-11-20T17:21:29,227 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ff0733c2ba794e0b84dd1a60891457ac, entries=150, sequenceid=558, filesize=12.0 K 2024-11-20T17:21:29,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/69a7419a6f4743618c3347e291313081 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/69a7419a6f4743618c3347e291313081 2024-11-20T17:21:29,232 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/69a7419a6f4743618c3347e291313081, entries=150, sequenceid=558, filesize=12.0 K 2024-11-20T17:21:29,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/c52d5cd29657429b8b3df3f4f4b2754e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c52d5cd29657429b8b3df3f4f4b2754e 2024-11-20T17:21:29,237 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c52d5cd29657429b8b3df3f4f4b2754e, entries=150, sequenceid=558, filesize=12.0 K 2024-11-20T17:21:29,238 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for d436a1ae301ec26cf78d29bd05a18bd2 in 1263ms, sequenceid=558, compaction requested=true 2024-11-20T17:21:29,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:29,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:29,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-20T17:21:29,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-20T17:21:29,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T17:21:29,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4170 sec 2024-11-20T17:21:29,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.4220 sec 2024-11-20T17:21:29,395 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:55266 2024-11-20T17:21:29,395 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:29,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:29,442 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:55266 2024-11-20T17:21:29,442 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:29,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/4f33de9edc4a47c3a53493a64f16b164 is 50, key is test_row_0/A:col10/1732123289440/Put/seqid=0 2024-11-20T17:21:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741958_1134 (size=9857) 2024-11-20T17:21:29,453 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:55266 2024-11-20T17:21:29,453 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:29,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/4f33de9edc4a47c3a53493a64f16b164 2024-11-20T17:21:29,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/bfab5e9e11f94201bf4b45c201a5e5b1 is 50, key is test_row_0/B:col10/1732123289440/Put/seqid=0 2024-11-20T17:21:29,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741959_1135 (size=9857) 2024-11-20T17:21:29,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/bfab5e9e11f94201bf4b45c201a5e5b1 2024-11-20T17:21:29,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/aefd5af5673a418d9266194ff822e57f is 50, key is test_row_0/C:col10/1732123289440/Put/seqid=0 2024-11-20T17:21:29,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741960_1136 (size=9857) 2024-11-20T17:21:29,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:21:29,925 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-20T17:21:30,147 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:55266 2024-11-20T17:21:30,147 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:30,149 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:55266 2024-11-20T17:21:30,150 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 189 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 188 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6766 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6752 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2934 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8798 rows 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2964 2024-11-20T17:21:30,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8882 rows 2024-11-20T17:21:30,150 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:21:30,150 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:55266 2024-11-20T17:21:30,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:30,156 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:21:30,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:21:30,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:30,166 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123290166"}]},"ts":"1732123290166"} 2024-11-20T17:21:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T17:21:30,167 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:21:30,170 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:21:30,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:21:30,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, UNASSIGN}] 2024-11-20T17:21:30,176 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, UNASSIGN 2024-11-20T17:21:30,177 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=d436a1ae301ec26cf78d29bd05a18bd2, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:30,178 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:21:30,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T17:21:30,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/aefd5af5673a418d9266194ff822e57f 2024-11-20T17:21:30,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/4f33de9edc4a47c3a53493a64f16b164 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4f33de9edc4a47c3a53493a64f16b164 2024-11-20T17:21:30,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4f33de9edc4a47c3a53493a64f16b164, entries=100, sequenceid=569, filesize=9.6 K 2024-11-20T17:21:30,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/bfab5e9e11f94201bf4b45c201a5e5b1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/bfab5e9e11f94201bf4b45c201a5e5b1 2024-11-20T17:21:30,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/bfab5e9e11f94201bf4b45c201a5e5b1, entries=100, sequenceid=569, filesize=9.6 K 2024-11-20T17:21:30,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/aefd5af5673a418d9266194ff822e57f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/aefd5af5673a418d9266194ff822e57f 2024-11-20T17:21:30,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/aefd5af5673a418d9266194ff822e57f, entries=100, sequenceid=569, filesize=9.6 K 2024-11-20T17:21:30,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=20.13 KB/20610 for d436a1ae301ec26cf78d29bd05a18bd2 in 860ms, sequenceid=569, compaction requested=true 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:30,301 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d436a1ae301ec26cf78d29bd05a18bd2:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:30,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:30,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:21:30,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57843 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:21:30,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/B is initiating minor compaction (all files) 2024-11-20T17:21:30,303 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60283 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:21:30,303 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/B in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:30,303 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): d436a1ae301ec26cf78d29bd05a18bd2/A is initiating minor compaction (all files) 2024-11-20T17:21:30,303 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d436a1ae301ec26cf78d29bd05a18bd2/A in TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:30,303 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/ed4f80081bea407ab7fc3da0454083aa, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/dd57aa1288c343928f9e6b3b6556c84d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7409bce27091450fa4faf57149cd7eff, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/69a7419a6f4743618c3347e291313081, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/bfab5e9e11f94201bf4b45c201a5e5b1] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=56.5 K 2024-11-20T17:21:30,303 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/41bba098689349e0b17c1084def688d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1c0c8e6435de4ed698839bd75129dcca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3abbdc6b4bdd4a799ba8f74975ca16c7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ff0733c2ba794e0b84dd1a60891457ac, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4f33de9edc4a47c3a53493a64f16b164] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp, totalSize=58.9 K 2024-11-20T17:21:30,303 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ed4f80081bea407ab7fc3da0454083aa, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732123284103 2024-11-20T17:21:30,304 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41bba098689349e0b17c1084def688d5, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732123284103 2024-11-20T17:21:30,304 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting dd57aa1288c343928f9e6b3b6556c84d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732123284722 2024-11-20T17:21:30,304 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c0c8e6435de4ed698839bd75129dcca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732123284722 2024-11-20T17:21:30,304 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 7409bce27091450fa4faf57149cd7eff, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=531, earliestPutTs=1732123286866 2024-11-20T17:21:30,304 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3abbdc6b4bdd4a799ba8f74975ca16c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=531, earliestPutTs=1732123286866 2024-11-20T17:21:30,305 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 69a7419a6f4743618c3347e291313081, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732123286895 2024-11-20T17:21:30,305 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff0733c2ba794e0b84dd1a60891457ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=558, earliestPutTs=1732123286895 2024-11-20T17:21:30,305 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting bfab5e9e11f94201bf4b45c201a5e5b1, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732123288023 2024-11-20T17:21:30,305 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f33de9edc4a47c3a53493a64f16b164, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732123288023 2024-11-20T17:21:30,317 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#B#compaction#122 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:30,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/d742c932d0ca44ce99130071fb2473ed is 50, key is test_row_0/B:col10/1732123289440/Put/seqid=0 2024-11-20T17:21:30,321 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d436a1ae301ec26cf78d29bd05a18bd2#A#compaction#123 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:30,321 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/ea60015ea0c34d36862f6ce53069c4af is 50, key is test_row_0/A:col10/1732123289440/Put/seqid=0 2024-11-20T17:21:30,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741961_1137 (size=13697) 2024-11-20T17:21:30,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741962_1138 (size=13697) 2024-11-20T17:21:30,333 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:30,335 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:30,335 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:21:30,336 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing d436a1ae301ec26cf78d29bd05a18bd2, disabling compactions & flushes 2024-11-20T17:21:30,336 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T17:21:30,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:21:30,729 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/d742c932d0ca44ce99130071fb2473ed as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d742c932d0ca44ce99130071fb2473ed 2024-11-20T17:21:30,730 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/ea60015ea0c34d36862f6ce53069c4af as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ea60015ea0c34d36862f6ce53069c4af 2024-11-20T17:21:30,734 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/B of d436a1ae301ec26cf78d29bd05a18bd2 into d742c932d0ca44ce99130071fb2473ed(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:30,734 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:30,734 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d436a1ae301ec26cf78d29bd05a18bd2/A of d436a1ae301ec26cf78d29bd05a18bd2 into ea60015ea0c34d36862f6ce53069c4af(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:30,735 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/B, priority=11, startTime=1732123290301; duration=0sec 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:30,735 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:B 2024-11-20T17:21:30,735 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2., storeName=d436a1ae301ec26cf78d29bd05a18bd2/A, priority=11, startTime=1732123290301; duration=0sec 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. because compaction request was cancelled 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:C 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. after waiting 0 ms 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:30,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d436a1ae301ec26cf78d29bd05a18bd2:A 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:30,735 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing d436a1ae301ec26cf78d29bd05a18bd2 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=A 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=B 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d436a1ae301ec26cf78d29bd05a18bd2, store=C 2024-11-20T17:21:30,735 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:30,739 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3f818f8952104dcc8cd19ea3f6346c62 is 50, key is test_row_0/A:col10/1732123290148/Put/seqid=0 2024-11-20T17:21:30,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741963_1139 (size=9857) 2024-11-20T17:21:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T17:21:31,144 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3f818f8952104dcc8cd19ea3f6346c62 2024-11-20T17:21:31,151 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/6cecd4caaea043edbc4a4039745f6369 is 50, key is test_row_0/B:col10/1732123290148/Put/seqid=0 2024-11-20T17:21:31,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741964_1140 (size=9857) 2024-11-20T17:21:31,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T17:21:31,556 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/6cecd4caaea043edbc4a4039745f6369 2024-11-20T17:21:31,563 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/15b15524d0ff43d8b8efec9489758b1f is 50, key is test_row_0/C:col10/1732123290148/Put/seqid=0 2024-11-20T17:21:31,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741965_1141 (size=9857) 2024-11-20T17:21:31,968 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/15b15524d0ff43d8b8efec9489758b1f 2024-11-20T17:21:31,973 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/A/3f818f8952104dcc8cd19ea3f6346c62 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f818f8952104dcc8cd19ea3f6346c62 2024-11-20T17:21:31,977 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f818f8952104dcc8cd19ea3f6346c62, entries=100, sequenceid=577, filesize=9.6 K 2024-11-20T17:21:31,978 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/B/6cecd4caaea043edbc4a4039745f6369 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6cecd4caaea043edbc4a4039745f6369 2024-11-20T17:21:31,982 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6cecd4caaea043edbc4a4039745f6369, entries=100, sequenceid=577, filesize=9.6 K 2024-11-20T17:21:31,982 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/.tmp/C/15b15524d0ff43d8b8efec9489758b1f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/15b15524d0ff43d8b8efec9489758b1f 2024-11-20T17:21:31,986 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/15b15524d0ff43d8b8efec9489758b1f, entries=100, sequenceid=577, filesize=9.6 K 2024-11-20T17:21:31,987 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for d436a1ae301ec26cf78d29bd05a18bd2 in 1252ms, sequenceid=577, compaction requested=true 2024-11-20T17:21:31,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/0a9da142b993446c884a0e14f5327b5b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/88ec75f0fe07425bbc883f98f04b0d82, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a799a4d87fd8445a81fe48a9f7d24b59, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fb271bb0e5fb47d8bec53a33ae42b947, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/247267beb1e44fcd99b9c3161a9a5ec5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/02ce090b4cf84c79876b8658cadcd446, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/44e69a6c5d834051a45ff6ab36594997, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3be21fc0281c4d85ae32ca41ae61eb7e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/c19359efc8ac44d9be70d9e5671ce69c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/60eb3fff24f945e18bb06242cc949678, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/82401d6cfa4b47948d317c76c28a6e71, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/094edeb7be964ef3a128c78b86a9299d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/8506204ed40840af867b5bfaac9ff469, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/cd499cf7922d4b828941dedf78adad9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f66119f3b1d4296bb0580926a6cde9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/5432d4ce26a14b92ae3b5875b3e7061c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3319e269e5624cef8f6035d1b144021d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/007108958e064f0087ed6dad51783e63, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fbc86b289f3145419972b888b135353b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/29022e865fe044c0b5adc47050721f51, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/83b55334c3654a02a04cfa676662e5a3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/742de9f588ef4ec595f02fa68b809209, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e2d52ea2226141e59c9bd3e0432357bb, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e3277cac91dc4a14b66d9926404e0954, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/6483edd9be424880bace3c4c612dff83, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/07578e696ef546cfb8fd81d6ad72d5f2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a423e8be77b744b985544a3808f085f6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/9bd615511bda4a268c0371a7698cfd55, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1456a3a49ac34efea63ca64ed5f91c59, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/f678be0fa2514abfa68a6135ff608982, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a876dd9fe3af40c080008bb1fb32ac99, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/2804cd3b15024815839b63ebd0d38071, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4182e61fb20f41b3b91e11050fc21092, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e701033ab6d24f2f83b8755bcf94766e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1ab8d41635c6461ba6a71e2c360ddc9f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/41bba098689349e0b17c1084def688d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/97975bdb221f47d7b280aabaddaf2e0b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1c0c8e6435de4ed698839bd75129dcca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3abbdc6b4bdd4a799ba8f74975ca16c7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ff0733c2ba794e0b84dd1a60891457ac, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4f33de9edc4a47c3a53493a64f16b164] to archive 2024-11-20T17:21:31,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:21:31,996 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/0a9da142b993446c884a0e14f5327b5b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/0a9da142b993446c884a0e14f5327b5b 2024-11-20T17:21:31,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/88ec75f0fe07425bbc883f98f04b0d82 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/88ec75f0fe07425bbc883f98f04b0d82 2024-11-20T17:21:31,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a799a4d87fd8445a81fe48a9f7d24b59 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a799a4d87fd8445a81fe48a9f7d24b59 2024-11-20T17:21:32,000 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fb271bb0e5fb47d8bec53a33ae42b947 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fb271bb0e5fb47d8bec53a33ae42b947 2024-11-20T17:21:32,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/247267beb1e44fcd99b9c3161a9a5ec5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/247267beb1e44fcd99b9c3161a9a5ec5 2024-11-20T17:21:32,002 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/02ce090b4cf84c79876b8658cadcd446 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/02ce090b4cf84c79876b8658cadcd446 2024-11-20T17:21:32,003 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/44e69a6c5d834051a45ff6ab36594997 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/44e69a6c5d834051a45ff6ab36594997 2024-11-20T17:21:32,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3be21fc0281c4d85ae32ca41ae61eb7e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3be21fc0281c4d85ae32ca41ae61eb7e 2024-11-20T17:21:32,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/c19359efc8ac44d9be70d9e5671ce69c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/c19359efc8ac44d9be70d9e5671ce69c 2024-11-20T17:21:32,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/60eb3fff24f945e18bb06242cc949678 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/60eb3fff24f945e18bb06242cc949678 2024-11-20T17:21:32,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/82401d6cfa4b47948d317c76c28a6e71 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/82401d6cfa4b47948d317c76c28a6e71 2024-11-20T17:21:32,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/094edeb7be964ef3a128c78b86a9299d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/094edeb7be964ef3a128c78b86a9299d 2024-11-20T17:21:32,009 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/8506204ed40840af867b5bfaac9ff469 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/8506204ed40840af867b5bfaac9ff469 2024-11-20T17:21:32,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/cd499cf7922d4b828941dedf78adad9a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/cd499cf7922d4b828941dedf78adad9a 2024-11-20T17:21:32,011 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f66119f3b1d4296bb0580926a6cde9a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f66119f3b1d4296bb0580926a6cde9a 2024-11-20T17:21:32,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/5432d4ce26a14b92ae3b5875b3e7061c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/5432d4ce26a14b92ae3b5875b3e7061c 2024-11-20T17:21:32,014 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3319e269e5624cef8f6035d1b144021d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3319e269e5624cef8f6035d1b144021d 2024-11-20T17:21:32,015 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/007108958e064f0087ed6dad51783e63 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/007108958e064f0087ed6dad51783e63 2024-11-20T17:21:32,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fbc86b289f3145419972b888b135353b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/fbc86b289f3145419972b888b135353b 2024-11-20T17:21:32,017 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/29022e865fe044c0b5adc47050721f51 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/29022e865fe044c0b5adc47050721f51 2024-11-20T17:21:32,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/83b55334c3654a02a04cfa676662e5a3 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/83b55334c3654a02a04cfa676662e5a3 2024-11-20T17:21:32,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/742de9f588ef4ec595f02fa68b809209 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/742de9f588ef4ec595f02fa68b809209 2024-11-20T17:21:32,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e2d52ea2226141e59c9bd3e0432357bb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e2d52ea2226141e59c9bd3e0432357bb 2024-11-20T17:21:32,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e3277cac91dc4a14b66d9926404e0954 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e3277cac91dc4a14b66d9926404e0954 2024-11-20T17:21:32,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/6483edd9be424880bace3c4c612dff83 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/6483edd9be424880bace3c4c612dff83 2024-11-20T17:21:32,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/07578e696ef546cfb8fd81d6ad72d5f2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/07578e696ef546cfb8fd81d6ad72d5f2 2024-11-20T17:21:32,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a423e8be77b744b985544a3808f085f6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a423e8be77b744b985544a3808f085f6 2024-11-20T17:21:32,027 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/9bd615511bda4a268c0371a7698cfd55 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/9bd615511bda4a268c0371a7698cfd55 2024-11-20T17:21:32,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1456a3a49ac34efea63ca64ed5f91c59 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1456a3a49ac34efea63ca64ed5f91c59 2024-11-20T17:21:32,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/f678be0fa2514abfa68a6135ff608982 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/f678be0fa2514abfa68a6135ff608982 2024-11-20T17:21:32,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a876dd9fe3af40c080008bb1fb32ac99 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/a876dd9fe3af40c080008bb1fb32ac99 2024-11-20T17:21:32,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/2804cd3b15024815839b63ebd0d38071 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/2804cd3b15024815839b63ebd0d38071 2024-11-20T17:21:32,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4182e61fb20f41b3b91e11050fc21092 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4182e61fb20f41b3b91e11050fc21092 2024-11-20T17:21:32,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e701033ab6d24f2f83b8755bcf94766e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/e701033ab6d24f2f83b8755bcf94766e 2024-11-20T17:21:32,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1ab8d41635c6461ba6a71e2c360ddc9f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1ab8d41635c6461ba6a71e2c360ddc9f 2024-11-20T17:21:32,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/41bba098689349e0b17c1084def688d5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/41bba098689349e0b17c1084def688d5 2024-11-20T17:21:32,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/97975bdb221f47d7b280aabaddaf2e0b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/97975bdb221f47d7b280aabaddaf2e0b 2024-11-20T17:21:32,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1c0c8e6435de4ed698839bd75129dcca to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/1c0c8e6435de4ed698839bd75129dcca 2024-11-20T17:21:32,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3abbdc6b4bdd4a799ba8f74975ca16c7 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3abbdc6b4bdd4a799ba8f74975ca16c7 2024-11-20T17:21:32,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ff0733c2ba794e0b84dd1a60891457ac to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ff0733c2ba794e0b84dd1a60891457ac 2024-11-20T17:21:32,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4f33de9edc4a47c3a53493a64f16b164 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/4f33de9edc4a47c3a53493a64f16b164 2024-11-20T17:21:32,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7a65b29e11794baab57ec8ae20595449, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a32eec7457e7479f8b6ecaa5172a56f8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f14c0c28cfb74c9fb783971b2dc3f28e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/80abe4187c2f4b8eaf86e86cdd3e5f5e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6536699df0a64897886d61816d14673b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/5fce7c2a1dcf4289bdfa16066b0b3fb2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/aed7419f0e054fb7a9df55a0b797c4ae, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/884d3aeda8c54667955d64526febbafc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/2f36e6eea3474788a53990a3c718a789, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/07f435337b3b4e3cbdf5030e1a12c437, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/245cadb77dc34dc08bd806d7c359c6e7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f1068ead3f874160b685d97d69bb8f95, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/046a31aaec7246dd8fcffd7c75ea9295, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f18a3fe3c4914673b5309a7a25e95aed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a89459494c554b1fbf7de61aaf28362a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a3a95f17fe3c44cdb9e94a625a64ae91, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1d268ddfc69f47a1a48e226cc09e5076, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d32617edf68245b5b218c4ebe446709d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/9e40c9fea11f4b3187dc71a86995127e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/43c2d327c83b4c2c8f8dfb57a4d254d2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/09c7ace4dacc48cf87ce1bb86dce3dc9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f17806586d264c3fb82f1948aa37a29a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7aa054b5f866455e932a61ba9b46f9d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/205284ea80a0416baeb7da2ad8512d85, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/c341bc43f8ed4041922c7d211e51b217, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/85a19cd7a7504fdcbd64f9799acae102, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/59c21e97516a49728010d8e2de8f2511, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/df9b57b011784809bdee989f502b6c8e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/b02629e3a7c349ccbf91f120dfd63b05, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/289338eddda4495bbd493a7d9e318c7d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/0c7f794644204c31baf8917ffe9c4d6b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e7c93b96fe5b49929375dd85424a585d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1871216eae244a4897d420c2259169fc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/492ed36b379e44ae82b029d10ef43c1c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/02342d7b6cb14736abacf01b9856d345, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/ed4f80081bea407ab7fc3da0454083aa, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e8634455f57f472a9829010bbdadd848, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/dd57aa1288c343928f9e6b3b6556c84d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7409bce27091450fa4faf57149cd7eff, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/69a7419a6f4743618c3347e291313081, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/bfab5e9e11f94201bf4b45c201a5e5b1] to archive 2024-11-20T17:21:32,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:21:32,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7a65b29e11794baab57ec8ae20595449 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7a65b29e11794baab57ec8ae20595449 2024-11-20T17:21:32,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a32eec7457e7479f8b6ecaa5172a56f8 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a32eec7457e7479f8b6ecaa5172a56f8 2024-11-20T17:21:32,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f14c0c28cfb74c9fb783971b2dc3f28e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f14c0c28cfb74c9fb783971b2dc3f28e 2024-11-20T17:21:32,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/80abe4187c2f4b8eaf86e86cdd3e5f5e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/80abe4187c2f4b8eaf86e86cdd3e5f5e 2024-11-20T17:21:32,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6536699df0a64897886d61816d14673b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6536699df0a64897886d61816d14673b 2024-11-20T17:21:32,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/5fce7c2a1dcf4289bdfa16066b0b3fb2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/5fce7c2a1dcf4289bdfa16066b0b3fb2 2024-11-20T17:21:32,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/aed7419f0e054fb7a9df55a0b797c4ae to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/aed7419f0e054fb7a9df55a0b797c4ae 2024-11-20T17:21:32,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/884d3aeda8c54667955d64526febbafc to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/884d3aeda8c54667955d64526febbafc 2024-11-20T17:21:32,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/2f36e6eea3474788a53990a3c718a789 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/2f36e6eea3474788a53990a3c718a789 2024-11-20T17:21:32,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/07f435337b3b4e3cbdf5030e1a12c437 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/07f435337b3b4e3cbdf5030e1a12c437 2024-11-20T17:21:32,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/245cadb77dc34dc08bd806d7c359c6e7 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/245cadb77dc34dc08bd806d7c359c6e7 2024-11-20T17:21:32,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f1068ead3f874160b685d97d69bb8f95 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f1068ead3f874160b685d97d69bb8f95 2024-11-20T17:21:32,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/046a31aaec7246dd8fcffd7c75ea9295 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/046a31aaec7246dd8fcffd7c75ea9295 2024-11-20T17:21:32,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f18a3fe3c4914673b5309a7a25e95aed to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f18a3fe3c4914673b5309a7a25e95aed 2024-11-20T17:21:32,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a89459494c554b1fbf7de61aaf28362a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a89459494c554b1fbf7de61aaf28362a 2024-11-20T17:21:32,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a3a95f17fe3c44cdb9e94a625a64ae91 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/a3a95f17fe3c44cdb9e94a625a64ae91 2024-11-20T17:21:32,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1d268ddfc69f47a1a48e226cc09e5076 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1d268ddfc69f47a1a48e226cc09e5076 2024-11-20T17:21:32,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d32617edf68245b5b218c4ebe446709d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d32617edf68245b5b218c4ebe446709d 2024-11-20T17:21:32,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/9e40c9fea11f4b3187dc71a86995127e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/9e40c9fea11f4b3187dc71a86995127e 2024-11-20T17:21:32,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/43c2d327c83b4c2c8f8dfb57a4d254d2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/43c2d327c83b4c2c8f8dfb57a4d254d2 2024-11-20T17:21:32,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/09c7ace4dacc48cf87ce1bb86dce3dc9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/09c7ace4dacc48cf87ce1bb86dce3dc9 2024-11-20T17:21:32,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f17806586d264c3fb82f1948aa37a29a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/f17806586d264c3fb82f1948aa37a29a 2024-11-20T17:21:32,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7aa054b5f866455e932a61ba9b46f9d1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7aa054b5f866455e932a61ba9b46f9d1 2024-11-20T17:21:32,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/205284ea80a0416baeb7da2ad8512d85 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/205284ea80a0416baeb7da2ad8512d85 2024-11-20T17:21:32,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/c341bc43f8ed4041922c7d211e51b217 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/c341bc43f8ed4041922c7d211e51b217 2024-11-20T17:21:32,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/85a19cd7a7504fdcbd64f9799acae102 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/85a19cd7a7504fdcbd64f9799acae102 2024-11-20T17:21:32,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/59c21e97516a49728010d8e2de8f2511 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/59c21e97516a49728010d8e2de8f2511 2024-11-20T17:21:32,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/df9b57b011784809bdee989f502b6c8e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/df9b57b011784809bdee989f502b6c8e 2024-11-20T17:21:32,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/b02629e3a7c349ccbf91f120dfd63b05 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/b02629e3a7c349ccbf91f120dfd63b05 2024-11-20T17:21:32,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/289338eddda4495bbd493a7d9e318c7d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/289338eddda4495bbd493a7d9e318c7d 2024-11-20T17:21:32,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/0c7f794644204c31baf8917ffe9c4d6b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/0c7f794644204c31baf8917ffe9c4d6b 2024-11-20T17:21:32,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e7c93b96fe5b49929375dd85424a585d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e7c93b96fe5b49929375dd85424a585d 2024-11-20T17:21:32,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1871216eae244a4897d420c2259169fc to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/1871216eae244a4897d420c2259169fc 2024-11-20T17:21:32,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/492ed36b379e44ae82b029d10ef43c1c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/492ed36b379e44ae82b029d10ef43c1c 2024-11-20T17:21:32,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/02342d7b6cb14736abacf01b9856d345 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/02342d7b6cb14736abacf01b9856d345 2024-11-20T17:21:32,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/ed4f80081bea407ab7fc3da0454083aa to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/ed4f80081bea407ab7fc3da0454083aa 2024-11-20T17:21:32,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e8634455f57f472a9829010bbdadd848 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/e8634455f57f472a9829010bbdadd848 2024-11-20T17:21:32,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/dd57aa1288c343928f9e6b3b6556c84d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/dd57aa1288c343928f9e6b3b6556c84d 2024-11-20T17:21:32,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7409bce27091450fa4faf57149cd7eff to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/7409bce27091450fa4faf57149cd7eff 2024-11-20T17:21:32,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/69a7419a6f4743618c3347e291313081 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/69a7419a6f4743618c3347e291313081 2024-11-20T17:21:32,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/bfab5e9e11f94201bf4b45c201a5e5b1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/bfab5e9e11f94201bf4b45c201a5e5b1 2024-11-20T17:21:32,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/77ff299657664e7eb936e034185276db, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/354a8aadb9a94e79ab23b3452dfb2368, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/5e0d71e3c6d14e57b673769ad705ab97, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/2e2694992d274051ad4cd3df0ffd2122, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/37976975e66742d7b85bea9c0dbefb95, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9e641872f30d4c47bddab4c53952a351, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/19c4c3b1863141d0bc5507a17dc8ea30, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/bab6ddf4fe0847f28d579fae3991663e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6dbac4fac49c47ea8dfffa352c36149c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/59b0d8c85427441c9a6abd22ad1453a2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3d57f0011fe14077b58502f91aeb5db4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e2de71420de044feb9dbffe027691976, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9b9286096653477da8a661a2e3329064, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e3c0182e0a624046bc0dae7796218adf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d071b6e7ce9f4157a63280535aa1aed9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/ab2b704c00284dc9a1df886da43deb1a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/988d60d2c45c455dba9bcab0559c2fec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/81189d68417b48098491148bc27eb54e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/29d0b5c73719449284ac177fbac5dde1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d861605c95624608bb296ce3a85d411c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b8a7c0a49b434533b3f6358b66193811, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a759007ac604b19b51609fd62d238cd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/efd7fb383f1040868bc39ad86ccdf215, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/be11974f3c2b430a8e145cb883e5df07, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e7de7ce26ad24469b5d8666f90970233, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6e6542e4f6944ac593934f7589fc464e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/82b77a6aaccb4feea9263e1281e81fc2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/72979612bb9840e9a5fca97f5a15ed19, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/f904697d903244ca94a8c78b9015ec33, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/096598ba46174ca8a539f1e447eb1889, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/0de2b7dfc704455c9ef45d241e962888, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a659f1508f34ce2b780d902f5d962c5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/babfa8d496e8441c95ae6a1c0b75397b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b0626bbfe70a48a18262cf97b624ac59, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/8a89126b9e60495b9bab6722c40ef07f] to archive 2024-11-20T17:21:32,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:21:32,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/77ff299657664e7eb936e034185276db to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/77ff299657664e7eb936e034185276db 2024-11-20T17:21:32,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/354a8aadb9a94e79ab23b3452dfb2368 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/354a8aadb9a94e79ab23b3452dfb2368 2024-11-20T17:21:32,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/5e0d71e3c6d14e57b673769ad705ab97 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/5e0d71e3c6d14e57b673769ad705ab97 2024-11-20T17:21:32,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/2e2694992d274051ad4cd3df0ffd2122 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/2e2694992d274051ad4cd3df0ffd2122 2024-11-20T17:21:32,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/37976975e66742d7b85bea9c0dbefb95 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/37976975e66742d7b85bea9c0dbefb95 2024-11-20T17:21:32,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9e641872f30d4c47bddab4c53952a351 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9e641872f30d4c47bddab4c53952a351 2024-11-20T17:21:32,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/19c4c3b1863141d0bc5507a17dc8ea30 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/19c4c3b1863141d0bc5507a17dc8ea30 2024-11-20T17:21:32,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/bab6ddf4fe0847f28d579fae3991663e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/bab6ddf4fe0847f28d579fae3991663e 2024-11-20T17:21:32,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6dbac4fac49c47ea8dfffa352c36149c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6dbac4fac49c47ea8dfffa352c36149c 2024-11-20T17:21:32,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/59b0d8c85427441c9a6abd22ad1453a2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/59b0d8c85427441c9a6abd22ad1453a2 2024-11-20T17:21:32,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3d57f0011fe14077b58502f91aeb5db4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3d57f0011fe14077b58502f91aeb5db4 2024-11-20T17:21:32,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e2de71420de044feb9dbffe027691976 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e2de71420de044feb9dbffe027691976 2024-11-20T17:21:32,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9b9286096653477da8a661a2e3329064 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/9b9286096653477da8a661a2e3329064 2024-11-20T17:21:32,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e3c0182e0a624046bc0dae7796218adf to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e3c0182e0a624046bc0dae7796218adf 2024-11-20T17:21:32,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d071b6e7ce9f4157a63280535aa1aed9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d071b6e7ce9f4157a63280535aa1aed9 2024-11-20T17:21:32,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/ab2b704c00284dc9a1df886da43deb1a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/ab2b704c00284dc9a1df886da43deb1a 2024-11-20T17:21:32,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/988d60d2c45c455dba9bcab0559c2fec to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/988d60d2c45c455dba9bcab0559c2fec 2024-11-20T17:21:32,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/81189d68417b48098491148bc27eb54e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/81189d68417b48098491148bc27eb54e 2024-11-20T17:21:32,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/29d0b5c73719449284ac177fbac5dde1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/29d0b5c73719449284ac177fbac5dde1 2024-11-20T17:21:32,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d861605c95624608bb296ce3a85d411c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/d861605c95624608bb296ce3a85d411c 2024-11-20T17:21:32,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b8a7c0a49b434533b3f6358b66193811 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b8a7c0a49b434533b3f6358b66193811 2024-11-20T17:21:32,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a759007ac604b19b51609fd62d238cd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a759007ac604b19b51609fd62d238cd 2024-11-20T17:21:32,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/efd7fb383f1040868bc39ad86ccdf215 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/efd7fb383f1040868bc39ad86ccdf215 2024-11-20T17:21:32,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/be11974f3c2b430a8e145cb883e5df07 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/be11974f3c2b430a8e145cb883e5df07 2024-11-20T17:21:32,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e7de7ce26ad24469b5d8666f90970233 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/e7de7ce26ad24469b5d8666f90970233 2024-11-20T17:21:32,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6e6542e4f6944ac593934f7589fc464e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/6e6542e4f6944ac593934f7589fc464e 2024-11-20T17:21:32,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/82b77a6aaccb4feea9263e1281e81fc2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/82b77a6aaccb4feea9263e1281e81fc2 2024-11-20T17:21:32,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/72979612bb9840e9a5fca97f5a15ed19 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/72979612bb9840e9a5fca97f5a15ed19 2024-11-20T17:21:32,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/f904697d903244ca94a8c78b9015ec33 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/f904697d903244ca94a8c78b9015ec33 2024-11-20T17:21:32,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/096598ba46174ca8a539f1e447eb1889 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/096598ba46174ca8a539f1e447eb1889 2024-11-20T17:21:32,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/0de2b7dfc704455c9ef45d241e962888 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/0de2b7dfc704455c9ef45d241e962888 2024-11-20T17:21:32,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a659f1508f34ce2b780d902f5d962c5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/3a659f1508f34ce2b780d902f5d962c5 2024-11-20T17:21:32,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/babfa8d496e8441c95ae6a1c0b75397b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/babfa8d496e8441c95ae6a1c0b75397b 2024-11-20T17:21:32,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b0626bbfe70a48a18262cf97b624ac59 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/b0626bbfe70a48a18262cf97b624ac59 2024-11-20T17:21:32,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/8a89126b9e60495b9bab6722c40ef07f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/8a89126b9e60495b9bab6722c40ef07f 2024-11-20T17:21:32,160 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/recovered.edits/580.seqid, newMaxSeqId=580, maxSeqId=1 2024-11-20T17:21:32,163 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2. 2024-11-20T17:21:32,163 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for d436a1ae301ec26cf78d29bd05a18bd2: 2024-11-20T17:21:32,165 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:32,165 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=d436a1ae301ec26cf78d29bd05a18bd2, regionState=CLOSED 2024-11-20T17:21:32,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-20T17:21:32,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure d436a1ae301ec26cf78d29bd05a18bd2, server=d514dc944523,40121,1732123262111 in 1.9880 sec 2024-11-20T17:21:32,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-20T17:21:32,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d436a1ae301ec26cf78d29bd05a18bd2, UNASSIGN in 1.9920 sec 2024-11-20T17:21:32,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T17:21:32,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9980 sec 2024-11-20T17:21:32,172 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123292171"}]},"ts":"1732123292171"} 2024-11-20T17:21:32,173 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:21:32,175 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:21:32,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0150 sec 2024-11-20T17:21:32,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T17:21:32,270 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-20T17:21:32,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:21:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,278 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,279 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T17:21:32,282 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:32,286 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/recovered.edits] 2024-11-20T17:21:32,290 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f818f8952104dcc8cd19ea3f6346c62 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/3f818f8952104dcc8cd19ea3f6346c62 2024-11-20T17:21:32,291 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ea60015ea0c34d36862f6ce53069c4af to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/A/ea60015ea0c34d36862f6ce53069c4af 2024-11-20T17:21:32,294 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6cecd4caaea043edbc4a4039745f6369 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/6cecd4caaea043edbc4a4039745f6369 2024-11-20T17:21:32,295 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d742c932d0ca44ce99130071fb2473ed to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/B/d742c932d0ca44ce99130071fb2473ed 2024-11-20T17:21:32,298 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/15b15524d0ff43d8b8efec9489758b1f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/15b15524d0ff43d8b8efec9489758b1f 2024-11-20T17:21:32,299 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/199d8ed7b5894d67b99f4074beac9070 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/199d8ed7b5894d67b99f4074beac9070 2024-11-20T17:21:32,300 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/7093bafb3e62455e9d77c640a7106669 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/7093bafb3e62455e9d77c640a7106669 2024-11-20T17:21:32,301 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/aefd5af5673a418d9266194ff822e57f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/aefd5af5673a418d9266194ff822e57f 2024-11-20T17:21:32,303 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c52d5cd29657429b8b3df3f4f4b2754e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c52d5cd29657429b8b3df3f4f4b2754e 2024-11-20T17:21:32,304 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c75bdb4ab61949238d22a0971a5132db to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/C/c75bdb4ab61949238d22a0971a5132db 2024-11-20T17:21:32,307 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/recovered.edits/580.seqid to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2/recovered.edits/580.seqid 2024-11-20T17:21:32,308 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/d436a1ae301ec26cf78d29bd05a18bd2 2024-11-20T17:21:32,308 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:21:32,313 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T17:21:32,322 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:21:32,353 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:21:32,355 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,355 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:21:32,355 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123292355"}]},"ts":"9223372036854775807"} 2024-11-20T17:21:32,358 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:21:32,358 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d436a1ae301ec26cf78d29bd05a18bd2, NAME => 'TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:21:32,358 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:21:32,358 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123292358"}]},"ts":"9223372036854775807"} 2024-11-20T17:21:32,360 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:21:32,363 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 89 msec 2024-11-20T17:21:32,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T17:21:32,381 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-11-20T17:21:32,392 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=241 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1161993582_22 at /127.0.0.1:52848 [Waiting for operation #318] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/d514dc944523:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_227606640_22 at /127.0.0.1:38390 [Waiting for operation #296] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;d514dc944523:40121-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/d514dc944523:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_227606640_22 at /127.0.0.1:41404 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1161993582_22 at /127.0.0.1:51858 [Waiting for operation #185] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=461 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=239 (was 139) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6335 (was 6890) 2024-11-20T17:21:32,401 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=241, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=239, ProcessCount=11, AvailableMemoryMB=6335 2024-11-20T17:21:32,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:21:32,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:21:32,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:32,405 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:21:32,405 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:32,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-11-20T17:21:32,406 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:21:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T17:21:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741966_1142 (size=960) 2024-11-20T17:21:32,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T17:21:32,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T17:21:32,814 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:21:32,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741967_1143 (size=53) 2024-11-20T17:21:33,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T17:21:33,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:33,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 51c47a20e94658a652843ed744178633, disabling compactions & flushes 2024-11-20T17:21:33,221 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. after waiting 0 ms 2024-11-20T17:21:33,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,221 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:33,222 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:21:33,222 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123293222"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123293222"}]},"ts":"1732123293222"} 2024-11-20T17:21:33,224 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:21:33,224 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:21:33,225 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123293224"}]},"ts":"1732123293224"} 2024-11-20T17:21:33,226 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:21:33,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, ASSIGN}] 2024-11-20T17:21:33,232 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, ASSIGN 2024-11-20T17:21:33,233 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:21:33,383 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:33,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T17:21:33,537 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:33,540 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,540 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:21:33,541 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,541 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:33,541 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,541 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,542 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,543 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:33,544 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c47a20e94658a652843ed744178633 columnFamilyName A 2024-11-20T17:21:33,544 DEBUG [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:33,544 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(327): Store=51c47a20e94658a652843ed744178633/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:33,544 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,545 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:33,546 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c47a20e94658a652843ed744178633 columnFamilyName B 2024-11-20T17:21:33,546 DEBUG [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:33,547 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(327): Store=51c47a20e94658a652843ed744178633/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:33,547 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,548 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:33,548 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c47a20e94658a652843ed744178633 columnFamilyName C 2024-11-20T17:21:33,548 DEBUG [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:33,548 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(327): Store=51c47a20e94658a652843ed744178633/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:33,549 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,549 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,550 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,551 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:21:33,552 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for 51c47a20e94658a652843ed744178633 2024-11-20T17:21:33,554 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:21:33,555 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened 51c47a20e94658a652843ed744178633; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74134609, jitterRate=0.1046917587518692}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:21:33,555 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:33,556 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., pid=43, masterSystemTime=1732123293537 2024-11-20T17:21:33,558 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,558 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:33,558 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:33,562 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T17:21:33,562 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 in 175 msec 2024-11-20T17:21:33,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-20T17:21:33,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, ASSIGN in 331 msec 2024-11-20T17:21:33,564 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:21:33,565 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123293564"}]},"ts":"1732123293564"} 2024-11-20T17:21:33,566 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:21:33,570 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:21:33,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1670 sec 2024-11-20T17:21:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T17:21:34,511 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-11-20T17:21:34,513 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d29de25 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a378df6 2024-11-20T17:21:34,518 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cca453a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:34,519 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:34,521 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:34,523 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:21:34,525 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:21:34,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:21:34,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:21:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:34,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741968_1144 (size=996) 2024-11-20T17:21:34,949 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T17:21:34,949 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T17:21:34,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:21:34,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, REOPEN/MOVE}] 2024-11-20T17:21:34,962 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, REOPEN/MOVE 2024-11-20T17:21:34,963 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:34,964 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:21:34,964 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:35,115 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:35,116 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,116 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:21:35,116 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing 51c47a20e94658a652843ed744178633, disabling compactions & flushes 2024-11-20T17:21:35,116 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,116 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,116 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. after waiting 0 ms 2024-11-20T17:21:35,116 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T17:21:35,121 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:35,121 WARN [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: 51c47a20e94658a652843ed744178633 to self. 2024-11-20T17:21:35,123 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,124 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=CLOSED 2024-11-20T17:21:35,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-20T17:21:35,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 in 161 msec 2024-11-20T17:21:35,126 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, REOPEN/MOVE; state=CLOSED, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=true 2024-11-20T17:21:35,277 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:35,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:35,435 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,435 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:21:35,435 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,435 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:21:35,436 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,436 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,440 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,441 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:35,446 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c47a20e94658a652843ed744178633 columnFamilyName A 2024-11-20T17:21:35,448 DEBUG [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:35,448 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(327): Store=51c47a20e94658a652843ed744178633/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:35,449 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,450 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:35,450 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c47a20e94658a652843ed744178633 columnFamilyName B 2024-11-20T17:21:35,450 DEBUG [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:35,450 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(327): Store=51c47a20e94658a652843ed744178633/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:35,450 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,451 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:21:35,451 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c47a20e94658a652843ed744178633 columnFamilyName C 2024-11-20T17:21:35,451 DEBUG [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:35,452 INFO [StoreOpener-51c47a20e94658a652843ed744178633-1 {}] regionserver.HStore(327): Store=51c47a20e94658a652843ed744178633/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:21:35,452 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,452 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,453 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,455 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:21:35,456 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,457 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened 51c47a20e94658a652843ed744178633; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74585671, jitterRate=0.11141310632228851}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:21:35,458 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:35,459 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., pid=48, masterSystemTime=1732123295431 2024-11-20T17:21:35,460 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,460 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,461 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=OPEN, openSeqNum=5, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-11-20T17:21:35,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 in 184 msec 2024-11-20T17:21:35,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-20T17:21:35,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, REOPEN/MOVE in 502 msec 2024-11-20T17:21:35,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-20T17:21:35,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 513 msec 2024-11-20T17:21:35,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 935 msec 2024-11-20T17:21:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-20T17:21:35,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-11-20T17:21:35,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,486 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x491ea2ee to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b55744e 2024-11-20T17:21:35,490 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-11-20T17:21:35,495 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f64590, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,496 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-11-20T17:21:35,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4c53ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,502 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2885d2d9 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cb464a 2024-11-20T17:21:35,506 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,508 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-11-20T17:21:35,512 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,513 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-11-20T17:21:35,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-11-20T17:21:35,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-11-20T17:21:35,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:21:35,531 DEBUG [hconnection-0x3f70508f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:35,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T17:21:35,532 DEBUG [hconnection-0x175efc9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,532 DEBUG [hconnection-0x7fe9104d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,532 DEBUG [hconnection-0x3f634d8c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,532 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:35,533 DEBUG [hconnection-0x4724447b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:35,533 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:35,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:35,534 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59564, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,534 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,534 DEBUG [hconnection-0x225711f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,534 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,534 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,535 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,538 DEBUG [hconnection-0x2e072ca0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,539 DEBUG [hconnection-0x28cd1dc0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,540 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,540 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59624, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,540 DEBUG [hconnection-0x2f10ee6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:21:35,540 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,543 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59636, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:21:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:35,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:35,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206dfd30535c284573bdf1924f362e13b7_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123295545/Put/seqid=0 2024-11-20T17:21:35,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741969_1145 (size=9714) 2024-11-20T17:21:35,608 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:35,613 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206dfd30535c284573bdf1924f362e13b7_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206dfd30535c284573bdf1924f362e13b7_51c47a20e94658a652843ed744178633 2024-11-20T17:21:35,614 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/cd97f675758b403e8c8f7883f22a1ce4, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:35,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/cd97f675758b403e8c8f7883f22a1ce4 is 175, key is test_row_0/A:col10/1732123295545/Put/seqid=0 2024-11-20T17:21:35,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741970_1146 (size=22361) 2024-11-20T17:21:35,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:35,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123355583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123355588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123355640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123355640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123355640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,686 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:35,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:35,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:35,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123355743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123355749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123355750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123355750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123355751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:35,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:35,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:35,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:35,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123355947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123355953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123355953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123355953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123355954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:35,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:35,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:35,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:35,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:35,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,031 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/cd97f675758b403e8c8f7883f22a1ce4 2024-11-20T17:21:36,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/50b34e4e4231415c9919344c4dccb0b4 is 50, key is test_row_0/B:col10/1732123295545/Put/seqid=0 2024-11-20T17:21:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741971_1147 (size=9657) 2024-11-20T17:21:36,077 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/50b34e4e4231415c9919344c4dccb0b4 2024-11-20T17:21:36,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/47105e9ee0e74eb8bde444d62e66f67a is 50, key is test_row_0/C:col10/1732123295545/Put/seqid=0 2024-11-20T17:21:36,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741972_1148 (size=9657) 2024-11-20T17:21:36,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/47105e9ee0e74eb8bde444d62e66f67a 2024-11-20T17:21:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/cd97f675758b403e8c8f7883f22a1ce4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4 2024-11-20T17:21:36,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4, entries=100, sequenceid=15, filesize=21.8 K 2024-11-20T17:21:36,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:36,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/50b34e4e4231415c9919344c4dccb0b4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50b34e4e4231415c9919344c4dccb0b4 2024-11-20T17:21:36,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50b34e4e4231415c9919344c4dccb0b4, entries=100, sequenceid=15, filesize=9.4 K 2024-11-20T17:21:36,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/47105e9ee0e74eb8bde444d62e66f67a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/47105e9ee0e74eb8bde444d62e66f67a 2024-11-20T17:21:36,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:36,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:36,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:36,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/47105e9ee0e74eb8bde444d62e66f67a, entries=100, sequenceid=15, filesize=9.4 K 2024-11-20T17:21:36,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 51c47a20e94658a652843ed744178633 in 600ms, sequenceid=15, compaction requested=false 2024-11-20T17:21:36,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:36,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:21:36,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:36,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:36,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:36,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:36,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:36,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:36,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112052f9bc979f5249b28f2b8c669a3692e4_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123296262/Put/seqid=0 2024-11-20T17:21:36,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741973_1149 (size=14594) 2024-11-20T17:21:36,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123356278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123356280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123356286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:36,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:36,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:36,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123356293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123356296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123356397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123356398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123356401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123356408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123356408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:36,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:36,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:36,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123356600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123356601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123356605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:36,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:36,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:36,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123356611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123356612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:36,684 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:36,690 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112052f9bc979f5249b28f2b8c669a3692e4_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052f9bc979f5249b28f2b8c669a3692e4_51c47a20e94658a652843ed744178633 2024-11-20T17:21:36,691 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8966c1ab1dc74484b595ad46c4105ea6, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:36,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8966c1ab1dc74484b595ad46c4105ea6 is 175, key is test_row_0/A:col10/1732123296262/Put/seqid=0 2024-11-20T17:21:36,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741974_1150 (size=39549) 2024-11-20T17:21:36,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:36,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:36,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:36,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123356903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123356905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123356912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123356915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,917 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:36,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:36,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:36,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:36,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:36,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123356917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:36,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,070 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:37,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:37,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:37,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,101 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8966c1ab1dc74484b595ad46c4105ea6 2024-11-20T17:21:37,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/050152866c634ec68a2e794ce4d0b2c6 is 50, key is test_row_0/B:col10/1732123296262/Put/seqid=0 2024-11-20T17:21:37,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741975_1151 (size=12001) 2024-11-20T17:21:37,223 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:37,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:37,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:37,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,377 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:37,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:37,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:37,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123357405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:37,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123357410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:37,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123357415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:37,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123357419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:37,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123357424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:37,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/050152866c634ec68a2e794ce4d0b2c6 2024-11-20T17:21:37,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/4d8fee83fb684961a5df56a294312b18 is 50, key is test_row_0/C:col10/1732123296262/Put/seqid=0 2024-11-20T17:21:37,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:37,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:37,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:37,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:37,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741976_1152 (size=12001) 2024-11-20T17:21:37,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/4d8fee83fb684961a5df56a294312b18 2024-11-20T17:21:37,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8966c1ab1dc74484b595ad46c4105ea6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6 2024-11-20T17:21:37,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6, entries=200, sequenceid=41, filesize=38.6 K 2024-11-20T17:21:37,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/050152866c634ec68a2e794ce4d0b2c6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/050152866c634ec68a2e794ce4d0b2c6 2024-11-20T17:21:37,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/050152866c634ec68a2e794ce4d0b2c6, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T17:21:37,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/4d8fee83fb684961a5df56a294312b18 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4d8fee83fb684961a5df56a294312b18 2024-11-20T17:21:37,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4d8fee83fb684961a5df56a294312b18, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T17:21:37,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 51c47a20e94658a652843ed744178633 in 1302ms, sequenceid=41, compaction requested=false 2024-11-20T17:21:37,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:37,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:37,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:21:37,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,686 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:21:37,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:37,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:37,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:37,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:37,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:37,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:37,698 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:21:37,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120318c4212ac30474698ab1d0057a00bcc_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123296290/Put/seqid=0 2024-11-20T17:21:37,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741977_1153 (size=12154) 2024-11-20T17:21:37,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,717 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120318c4212ac30474698ab1d0057a00bcc_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120318c4212ac30474698ab1d0057a00bcc_51c47a20e94658a652843ed744178633 2024-11-20T17:21:37,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8e9c710d43cc479eba39858ce2f0c667, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8e9c710d43cc479eba39858ce2f0c667 is 175, key is test_row_0/A:col10/1732123296290/Put/seqid=0 2024-11-20T17:21:37,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741978_1154 (size=30955) 2024-11-20T17:21:37,751 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8e9c710d43cc479eba39858ce2f0c667 2024-11-20T17:21:37,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/370aeb39058d4059afc23c2930502ef6 is 50, key is test_row_0/B:col10/1732123296290/Put/seqid=0 2024-11-20T17:21:37,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741979_1155 (size=12001) 2024-11-20T17:21:37,778 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/370aeb39058d4059afc23c2930502ef6 2024-11-20T17:21:37,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d86b815d5339429fa8e8396a30644009 is 50, key is test_row_0/C:col10/1732123296290/Put/seqid=0 2024-11-20T17:21:37,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741980_1156 (size=12001) 2024-11-20T17:21:37,804 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d86b815d5339429fa8e8396a30644009 2024-11-20T17:21:37,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/8e9c710d43cc479eba39858ce2f0c667 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667 2024-11-20T17:21:37,824 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667, entries=150, sequenceid=51, filesize=30.2 K 2024-11-20T17:21:37,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/370aeb39058d4059afc23c2930502ef6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/370aeb39058d4059afc23c2930502ef6 2024-11-20T17:21:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,832 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/370aeb39058d4059afc23c2930502ef6, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:21:37,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d86b815d5339429fa8e8396a30644009 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d86b815d5339429fa8e8396a30644009 2024-11-20T17:21:37,837 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d86b815d5339429fa8e8396a30644009, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:21:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,839 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 51c47a20e94658a652843ed744178633 in 153ms, sequenceid=51, compaction requested=true 2024-11-20T17:21:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:37,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:37,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T17:21:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T17:21:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T17:21:37,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3120 sec 2024-11-20T17:21:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.3170 sec 2024-11-20T17:21:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:38,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:21:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112048531c39242d4da8a6bad57cdc870593_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:38,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123358488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123358490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741982_1158 (size=29238) 2024-11-20T17:21:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123358495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123358496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123358498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123358597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123358599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123358601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123358601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123358622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,781 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T17:21:38,782 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T17:21:38,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123358800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123358802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123358823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123358824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123358827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:38,900 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:38,906 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112048531c39242d4da8a6bad57cdc870593_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112048531c39242d4da8a6bad57cdc870593_51c47a20e94658a652843ed744178633 2024-11-20T17:21:38,908 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/68a61c8e891043ca9d66c5bbcc1d3b9b, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:38,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/68a61c8e891043ca9d66c5bbcc1d3b9b is 175, key is test_row_0/A:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:38,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741981_1157 (size=91179) 2024-11-20T17:21:39,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123359102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123359105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123359129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123359130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123359131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,313 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=64, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/68a61c8e891043ca9d66c5bbcc1d3b9b 2024-11-20T17:21:39,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/90091ff993db4ff4b8f58245ba434429 is 50, key is test_row_0/B:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:39,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741983_1159 (size=12001) 2024-11-20T17:21:39,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123359608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123359610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123359634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123359635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:21:39,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123359636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:39,639 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T17:21:39,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:39,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T17:21:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:21:39,643 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:39,644 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:39,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:39,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/90091ff993db4ff4b8f58245ba434429 2024-11-20T17:21:39,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ef001bd4f4a441f3b060cd1b3911daed is 50, key is test_row_0/C:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:39,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741984_1160 (size=12001) 2024-11-20T17:21:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:21:39,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:39,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:21:39,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:39,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:39,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:39,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:39,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:39,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:21:39,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:39,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:21:39,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:39,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:39,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:39,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:39,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:40,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:40,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:21:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:40,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:40,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ef001bd4f4a441f3b060cd1b3911daed 2024-11-20T17:21:40,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/68a61c8e891043ca9d66c5bbcc1d3b9b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b 2024-11-20T17:21:40,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b, entries=500, sequenceid=64, filesize=89.0 K 2024-11-20T17:21:40,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/90091ff993db4ff4b8f58245ba434429 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/90091ff993db4ff4b8f58245ba434429 2024-11-20T17:21:40,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/90091ff993db4ff4b8f58245ba434429, entries=150, sequenceid=64, filesize=11.7 K 2024-11-20T17:21:40,163 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T17:21:40,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ef001bd4f4a441f3b060cd1b3911daed as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef001bd4f4a441f3b060cd1b3911daed 2024-11-20T17:21:40,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef001bd4f4a441f3b060cd1b3911daed, entries=150, sequenceid=64, filesize=11.7 K 2024-11-20T17:21:40,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 51c47a20e94658a652843ed744178633 in 1718ms, sequenceid=64, compaction requested=true 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:40,171 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:40,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:40,171 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:40,174 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:40,174 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 184044 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:40,174 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/B is initiating minor compaction (all files) 2024-11-20T17:21:40,174 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/A is initiating minor compaction (all files) 2024-11-20T17:21:40,174 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/A in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,174 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/B in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,174 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50b34e4e4231415c9919344c4dccb0b4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/050152866c634ec68a2e794ce4d0b2c6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/370aeb39058d4059afc23c2930502ef6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/90091ff993db4ff4b8f58245ba434429] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=44.6 K 2024-11-20T17:21:40,174 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=179.7 K 2024-11-20T17:21:40,174 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,174 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b] 2024-11-20T17:21:40,175 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 50b34e4e4231415c9919344c4dccb0b4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732123295545 2024-11-20T17:21:40,175 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd97f675758b403e8c8f7883f22a1ce4, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732123295545 2024-11-20T17:21:40,175 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 050152866c634ec68a2e794ce4d0b2c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123295577 2024-11-20T17:21:40,175 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8966c1ab1dc74484b595ad46c4105ea6, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123295577 2024-11-20T17:21:40,176 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 370aeb39058d4059afc23c2930502ef6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123296277 2024-11-20T17:21:40,176 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e9c710d43cc479eba39858ce2f0c667, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123296277 2024-11-20T17:21:40,176 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 90091ff993db4ff4b8f58245ba434429, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732123298452 2024-11-20T17:21:40,176 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68a61c8e891043ca9d66c5bbcc1d3b9b, keycount=500, bloomtype=ROW, size=89.0 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732123298441 2024-11-20T17:21:40,188 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#B#compaction#139 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:40,189 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/f2a3f9f2a8e948599628cc5478fe2d56 is 50, key is test_row_0/B:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:40,197 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:40,205 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120011611bb8c3348d6b524ff8311a93d1e_51c47a20e94658a652843ed744178633 store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:40,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120011611bb8c3348d6b524ff8311a93d1e_51c47a20e94658a652843ed744178633, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:40,211 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120011611bb8c3348d6b524ff8311a93d1e_51c47a20e94658a652843ed744178633 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:40,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741985_1161 (size=12139) 2024-11-20T17:21:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741986_1162 (size=4469) 2024-11-20T17:21:40,242 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#A#compaction#140 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:40,244 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/afe6ed6da53f4dc4ae8e4303a0930a77 is 175, key is test_row_0/A:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:21:40,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741987_1163 (size=31093) 2024-11-20T17:21:40,256 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:40,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:21:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,258 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:21:40,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:40,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:40,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:40,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:40,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:40,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:40,265 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/afe6ed6da53f4dc4ae8e4303a0930a77 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/afe6ed6da53f4dc4ae8e4303a0930a77 2024-11-20T17:21:40,272 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51c47a20e94658a652843ed744178633/A of 51c47a20e94658a652843ed744178633 into afe6ed6da53f4dc4ae8e4303a0930a77(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:40,272 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:40,272 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/A, priority=12, startTime=1732123300171; duration=0sec 2024-11-20T17:21:40,272 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:40,272 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:40,272 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:40,275 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:40,277 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/C is initiating minor compaction (all files) 2024-11-20T17:21:40,277 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/C in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:40,277 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/47105e9ee0e74eb8bde444d62e66f67a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4d8fee83fb684961a5df56a294312b18, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d86b815d5339429fa8e8396a30644009, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef001bd4f4a441f3b060cd1b3911daed] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=44.6 K 2024-11-20T17:21:40,278 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47105e9ee0e74eb8bde444d62e66f67a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732123295545 2024-11-20T17:21:40,279 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d8fee83fb684961a5df56a294312b18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123295577 2024-11-20T17:21:40,279 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d86b815d5339429fa8e8396a30644009, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123296277 2024-11-20T17:21:40,279 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef001bd4f4a441f3b060cd1b3911daed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732123298452 2024-11-20T17:21:40,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120baead337095b46f2bba998f858e5d43f_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123298486/Put/seqid=0 2024-11-20T17:21:40,299 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#C#compaction#142 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:40,300 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/11e8ac8373054dde8fc5ef1401594a2a is 50, key is test_row_0/C:col10/1732123298452/Put/seqid=0 2024-11-20T17:21:40,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741988_1164 (size=12154) 2024-11-20T17:21:40,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:40,316 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120baead337095b46f2bba998f858e5d43f_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120baead337095b46f2bba998f858e5d43f_51c47a20e94658a652843ed744178633 2024-11-20T17:21:40,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/bfcbd87c43b9440ba768eccded6c1e0c, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:40,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/bfcbd87c43b9440ba768eccded6c1e0c is 175, key is test_row_0/A:col10/1732123298486/Put/seqid=0 2024-11-20T17:21:40,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741990_1166 (size=30955) 2024-11-20T17:21:40,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741989_1165 (size=12139) 2024-11-20T17:21:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:40,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:40,625 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/f2a3f9f2a8e948599628cc5478fe2d56 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/f2a3f9f2a8e948599628cc5478fe2d56 2024-11-20T17:21:40,632 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51c47a20e94658a652843ed744178633/B of 51c47a20e94658a652843ed744178633 into f2a3f9f2a8e948599628cc5478fe2d56(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:40,633 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:40,633 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/B, priority=12, startTime=1732123300171; duration=0sec 2024-11-20T17:21:40,633 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:40,633 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:40,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123360636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123360637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123360643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123360643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123360649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123360739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123360740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,744 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=88, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/bfcbd87c43b9440ba768eccded6c1e0c 2024-11-20T17:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:21:40,758 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/11e8ac8373054dde8fc5ef1401594a2a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/11e8ac8373054dde8fc5ef1401594a2a 2024-11-20T17:21:40,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/d88cf25c8c3a42129e5a8180d1666a20 is 50, key is test_row_0/B:col10/1732123298486/Put/seqid=0 2024-11-20T17:21:40,764 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51c47a20e94658a652843ed744178633/C of 51c47a20e94658a652843ed744178633 into 11e8ac8373054dde8fc5ef1401594a2a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:40,764 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:40,764 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/C, priority=12, startTime=1732123300171; duration=0sec 2024-11-20T17:21:40,764 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:40,764 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:40,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741991_1167 (size=12001) 2024-11-20T17:21:40,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123360942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:40,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:40,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123360945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,168 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/d88cf25c8c3a42129e5a8180d1666a20 2024-11-20T17:21:41,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/65a5c740c060406799b2d0422b05f2ee is 50, key is test_row_0/C:col10/1732123298486/Put/seqid=0 2024-11-20T17:21:41,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741992_1168 (size=12001) 2024-11-20T17:21:41,184 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/65a5c740c060406799b2d0422b05f2ee 2024-11-20T17:21:41,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/bfcbd87c43b9440ba768eccded6c1e0c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c 2024-11-20T17:21:41,196 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c, entries=150, sequenceid=88, filesize=30.2 K 2024-11-20T17:21:41,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/d88cf25c8c3a42129e5a8180d1666a20 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/d88cf25c8c3a42129e5a8180d1666a20 2024-11-20T17:21:41,203 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/d88cf25c8c3a42129e5a8180d1666a20, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T17:21:41,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/65a5c740c060406799b2d0422b05f2ee as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/65a5c740c060406799b2d0422b05f2ee 2024-11-20T17:21:41,210 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/65a5c740c060406799b2d0422b05f2ee, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T17:21:41,211 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 51c47a20e94658a652843ed744178633 in 954ms, sequenceid=88, compaction requested=false 2024-11-20T17:21:41,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:41,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:41,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T17:21:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T17:21:41,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T17:21:41,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5690 sec 2024-11-20T17:21:41,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.5740 sec 2024-11-20T17:21:41,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:41,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:21:41,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:41,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:41,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:41,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:41,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:41,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:41,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120889a03966da64c6daffc415e66aab84b_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:41,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741993_1169 (size=14594) 2024-11-20T17:21:41,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123361283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123361285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123361388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123361391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123361591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123361594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,662 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:41,666 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120889a03966da64c6daffc415e66aab84b_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120889a03966da64c6daffc415e66aab84b_51c47a20e94658a652843ed744178633 2024-11-20T17:21:41,667 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/991fb005a2814d9bbf47ab0b39be3540, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:41,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/991fb005a2814d9bbf47ab0b39be3540 is 175, key is test_row_0/A:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:41,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741994_1170 (size=39549) 2024-11-20T17:21:41,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:21:41,748 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T17:21:41,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:41,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T17:21:41,751 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:41,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:41,752 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:41,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:41,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:41,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123361896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:41,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123361897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:41,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:41,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:41,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:41,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:41,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:41,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:41,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:42,057 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:42,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,074 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=104, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/991fb005a2814d9bbf47ab0b39be3540 2024-11-20T17:21:42,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/a6f0940a06f142ee9f2878e516811b14 is 50, key is test_row_0/B:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741995_1171 (size=12001) 2024-11-20T17:21:42,210 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:42,362 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:42,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123362399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:42,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123362402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:42,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/a6f0940a06f142ee9f2878e516811b14 2024-11-20T17:21:42,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/40935847f77e4c59977cf38abb42eec6 is 50, key is test_row_0/C:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741996_1172 (size=12001) 2024-11-20T17:21:42,515 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:42,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123362648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:42,650 DEBUG [Thread-702 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:42,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123362660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:42,662 DEBUG [Thread-704 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:42,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123362666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:42,667 DEBUG [Thread-706 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:42,670 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:42,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,823 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:42,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:42,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/40935847f77e4c59977cf38abb42eec6 2024-11-20T17:21:42,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/991fb005a2814d9bbf47ab0b39be3540 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540 2024-11-20T17:21:42,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540, entries=200, sequenceid=104, filesize=38.6 K 2024-11-20T17:21:42,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/a6f0940a06f142ee9f2878e516811b14 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a6f0940a06f142ee9f2878e516811b14 2024-11-20T17:21:42,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a6f0940a06f142ee9f2878e516811b14, entries=150, sequenceid=104, filesize=11.7 K 2024-11-20T17:21:42,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/40935847f77e4c59977cf38abb42eec6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/40935847f77e4c59977cf38abb42eec6 2024-11-20T17:21:42,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/40935847f77e4c59977cf38abb42eec6, entries=150, sequenceid=104, filesize=11.7 K 2024-11-20T17:21:42,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 51c47a20e94658a652843ed744178633 in 1685ms, sequenceid=104, compaction requested=true 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:42,932 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:42,932 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:42,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:42,933 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:42,934 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/A is initiating minor compaction (all files) 2024-11-20T17:21:42,934 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/A in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,934 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/afe6ed6da53f4dc4ae8e4303a0930a77, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=99.2 K 2024-11-20T17:21:42,934 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,934 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/afe6ed6da53f4dc4ae8e4303a0930a77, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540] 2024-11-20T17:21:42,935 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting afe6ed6da53f4dc4ae8e4303a0930a77, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732123298452 2024-11-20T17:21:42,935 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:42,935 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/B is initiating minor compaction (all files) 2024-11-20T17:21:42,935 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/B in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,935 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/f2a3f9f2a8e948599628cc5478fe2d56, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/d88cf25c8c3a42129e5a8180d1666a20, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a6f0940a06f142ee9f2878e516811b14] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=35.3 K 2024-11-20T17:21:42,936 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfcbd87c43b9440ba768eccded6c1e0c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732123298486 2024-11-20T17:21:42,936 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f2a3f9f2a8e948599628cc5478fe2d56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732123298452 2024-11-20T17:21:42,937 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d88cf25c8c3a42129e5a8180d1666a20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732123298486 2024-11-20T17:21:42,937 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 991fb005a2814d9bbf47ab0b39be3540, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732123300622 2024-11-20T17:21:42,937 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting a6f0940a06f142ee9f2878e516811b14, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732123300634 2024-11-20T17:21:42,945 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:42,946 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#B#compaction#148 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:42,947 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/46a59a32ab6b4323be02bf151e7bc879 is 50, key is test_row_0/B:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:42,951 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a71b6cc2c5394821abbead89aa86b62d_51c47a20e94658a652843ed744178633 store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:42,953 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a71b6cc2c5394821abbead89aa86b62d_51c47a20e94658a652843ed744178633, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:42,953 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a71b6cc2c5394821abbead89aa86b62d_51c47a20e94658a652843ed744178633 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:42,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741997_1173 (size=12241) 2024-11-20T17:21:42,965 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/46a59a32ab6b4323be02bf151e7bc879 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/46a59a32ab6b4323be02bf151e7bc879 2024-11-20T17:21:42,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741998_1174 (size=4469) 2024-11-20T17:21:42,973 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/B of 51c47a20e94658a652843ed744178633 into 46a59a32ab6b4323be02bf151e7bc879(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:42,973 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:42,973 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/B, priority=13, startTime=1732123302932; duration=0sec 2024-11-20T17:21:42,973 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:42,973 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:42,973 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:42,974 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:42,974 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/C is initiating minor compaction (all files) 2024-11-20T17:21:42,975 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/C in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,975 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/11e8ac8373054dde8fc5ef1401594a2a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/65a5c740c060406799b2d0422b05f2ee, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/40935847f77e4c59977cf38abb42eec6] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=35.3 K 2024-11-20T17:21:42,975 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 11e8ac8373054dde8fc5ef1401594a2a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732123298452 2024-11-20T17:21:42,975 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#A#compaction#149 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:42,976 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 65a5c740c060406799b2d0422b05f2ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732123298486 2024-11-20T17:21:42,976 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 40935847f77e4c59977cf38abb42eec6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732123300634 2024-11-20T17:21:42,976 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/f916b89cb02a429fb2571d46d59f431f is 175, key is test_row_0/A:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:42,977 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:42,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:21:42,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:42,978 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:21:42,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:42,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:42,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:42,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:42,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:42,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:42,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741999_1175 (size=31195) 2024-11-20T17:21:42,992 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/f916b89cb02a429fb2571d46d59f431f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/f916b89cb02a429fb2571d46d59f431f 2024-11-20T17:21:42,994 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#C#compaction#150 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:42,994 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/f4907b5aab06448a85730b76062da06b is 50, key is test_row_0/C:col10/1732123300634/Put/seqid=0 2024-11-20T17:21:42,999 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/A of 51c47a20e94658a652843ed744178633 into f916b89cb02a429fb2571d46d59f431f(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:42,999 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:42,999 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/A, priority=13, startTime=1732123302932; duration=0sec 2024-11-20T17:21:42,999 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:42,999 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:43,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bc368e321f364ff4a0f6e26e09a4e390_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123301282/Put/seqid=0 2024-11-20T17:21:43,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742000_1176 (size=12241) 2024-11-20T17:21:43,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742001_1177 (size=12154) 2024-11-20T17:21:43,007 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/f4907b5aab06448a85730b76062da06b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/f4907b5aab06448a85730b76062da06b 2024-11-20T17:21:43,015 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/C of 51c47a20e94658a652843ed744178633 into f4907b5aab06448a85730b76062da06b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:43,015 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:43,015 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/C, priority=13, startTime=1732123302932; duration=0sec 2024-11-20T17:21:43,015 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:43,015 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:43,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:43,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:43,413 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bc368e321f364ff4a0f6e26e09a4e390_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bc368e321f364ff4a0f6e26e09a4e390_51c47a20e94658a652843ed744178633 2024-11-20T17:21:43,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/4a74b7432e5b4e33841b20a6b2760033, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:43,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/4a74b7432e5b4e33841b20a6b2760033 is 175, key is test_row_0/A:col10/1732123301282/Put/seqid=0 2024-11-20T17:21:43,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742002_1178 (size=30955) 2024-11-20T17:21:43,421 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=128, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/4a74b7432e5b4e33841b20a6b2760033 2024-11-20T17:21:43,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123363424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:43,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123363424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:43,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/3d32826dbb47443ab558808412343d84 is 50, key is test_row_0/B:col10/1732123301282/Put/seqid=0 2024-11-20T17:21:43,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742003_1179 (size=12001) 2024-11-20T17:21:43,446 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/3d32826dbb47443ab558808412343d84 2024-11-20T17:21:43,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/26afdb17abe24757a3b4acf8a392d978 is 50, key is test_row_0/C:col10/1732123301282/Put/seqid=0 2024-11-20T17:21:43,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742004_1180 (size=12001) 2024-11-20T17:21:43,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123363525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:43,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123363525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:43,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:43,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123363728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:43,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:43,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123363729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:43,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:43,860 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/26afdb17abe24757a3b4acf8a392d978 2024-11-20T17:21:43,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/4a74b7432e5b4e33841b20a6b2760033 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033 2024-11-20T17:21:43,871 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033, entries=150, sequenceid=128, filesize=30.2 K 2024-11-20T17:21:43,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/3d32826dbb47443ab558808412343d84 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/3d32826dbb47443ab558808412343d84 2024-11-20T17:21:43,877 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/3d32826dbb47443ab558808412343d84, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T17:21:43,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/26afdb17abe24757a3b4acf8a392d978 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/26afdb17abe24757a3b4acf8a392d978 2024-11-20T17:21:43,883 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/26afdb17abe24757a3b4acf8a392d978, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T17:21:43,884 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 51c47a20e94658a652843ed744178633 in 906ms, sequenceid=128, compaction requested=false 2024-11-20T17:21:43,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:43,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:43,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T17:21:43,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T17:21:43,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T17:21:43,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1340 sec 2024-11-20T17:21:43,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.1390 sec 2024-11-20T17:21:44,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:44,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:21:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:44,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120818c5b6b2b764846b0340b2d1fd8d703_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:44,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123364062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123364063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742005_1181 (size=12304) 2024-11-20T17:21:44,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123364165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123364167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123364369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123364371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,469 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:44,473 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120818c5b6b2b764846b0340b2d1fd8d703_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120818c5b6b2b764846b0340b2d1fd8d703_51c47a20e94658a652843ed744178633 2024-11-20T17:21:44,474 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/726720490825493e8282eea8623bcd22, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:44,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/726720490825493e8282eea8623bcd22 is 175, key is test_row_0/A:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:44,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742006_1182 (size=31105) 2024-11-20T17:21:44,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123364671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:44,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123364673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:44,883 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=144, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/726720490825493e8282eea8623bcd22 2024-11-20T17:21:44,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c728f80159bc4bceba8a1ede3af75685 is 50, key is test_row_0/B:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:44,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742007_1183 (size=12151) 2024-11-20T17:21:45,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:45,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123365177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:45,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:45,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123365177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:45,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c728f80159bc4bceba8a1ede3af75685 2024-11-20T17:21:45,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/32ce100e3d6849bf85fc79dbcceed496 is 50, key is test_row_0/C:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:45,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742008_1184 (size=12151) 2024-11-20T17:21:45,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/32ce100e3d6849bf85fc79dbcceed496 2024-11-20T17:21:45,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/726720490825493e8282eea8623bcd22 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22 2024-11-20T17:21:45,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22, entries=150, sequenceid=144, filesize=30.4 K 2024-11-20T17:21:45,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c728f80159bc4bceba8a1ede3af75685 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c728f80159bc4bceba8a1ede3af75685 2024-11-20T17:21:45,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c728f80159bc4bceba8a1ede3af75685, entries=150, sequenceid=144, filesize=11.9 K 2024-11-20T17:21:45,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/32ce100e3d6849bf85fc79dbcceed496 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32ce100e3d6849bf85fc79dbcceed496 2024-11-20T17:21:45,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32ce100e3d6849bf85fc79dbcceed496, entries=150, sequenceid=144, filesize=11.9 K 2024-11-20T17:21:45,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 51c47a20e94658a652843ed744178633 in 1701ms, sequenceid=144, compaction requested=true 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:45,733 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:45,733 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/B is initiating minor compaction (all files) 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/A is initiating minor compaction (all files) 2024-11-20T17:21:45,735 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/B in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:45,735 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/A in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:45,735 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/46a59a32ab6b4323be02bf151e7bc879, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/3d32826dbb47443ab558808412343d84, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c728f80159bc4bceba8a1ede3af75685] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=35.5 K 2024-11-20T17:21:45,735 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/f916b89cb02a429fb2571d46d59f431f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=91.1 K 2024-11-20T17:21:45,735 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/f916b89cb02a429fb2571d46d59f431f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22] 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 46a59a32ab6b4323be02bf151e7bc879, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732123300634 2024-11-20T17:21:45,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting f916b89cb02a429fb2571d46d59f431f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732123300634 2024-11-20T17:21:45,736 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d32826dbb47443ab558808412343d84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732123301276 2024-11-20T17:21:45,736 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a74b7432e5b4e33841b20a6b2760033, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732123301276 2024-11-20T17:21:45,736 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c728f80159bc4bceba8a1ede3af75685, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732123303416 2024-11-20T17:21:45,736 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 726720490825493e8282eea8623bcd22, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732123303416 2024-11-20T17:21:45,743 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:45,744 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#B#compaction#157 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:45,745 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/e96e7fb470034b85ae39c2562dc4df1d is 50, key is test_row_0/B:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:45,746 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120093b12d65ee64a76ba5af9bdeb11b0ce_51c47a20e94658a652843ed744178633 store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:45,748 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120093b12d65ee64a76ba5af9bdeb11b0ce_51c47a20e94658a652843ed744178633, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:45,748 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120093b12d65ee64a76ba5af9bdeb11b0ce_51c47a20e94658a652843ed744178633 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:45,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742009_1185 (size=12493) 2024-11-20T17:21:45,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742010_1186 (size=4469) 2024-11-20T17:21:45,772 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/e96e7fb470034b85ae39c2562dc4df1d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e96e7fb470034b85ae39c2562dc4df1d 2024-11-20T17:21:45,778 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/B of 51c47a20e94658a652843ed744178633 into e96e7fb470034b85ae39c2562dc4df1d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:45,778 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:45,778 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/B, priority=13, startTime=1732123305733; duration=0sec 2024-11-20T17:21:45,778 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:45,778 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:45,779 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:45,780 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:45,780 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/C is initiating minor compaction (all files) 2024-11-20T17:21:45,780 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/C in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:45,780 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/f4907b5aab06448a85730b76062da06b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/26afdb17abe24757a3b4acf8a392d978, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32ce100e3d6849bf85fc79dbcceed496] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=35.5 K 2024-11-20T17:21:45,781 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f4907b5aab06448a85730b76062da06b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732123300634 2024-11-20T17:21:45,781 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 26afdb17abe24757a3b4acf8a392d978, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732123301276 2024-11-20T17:21:45,781 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 32ce100e3d6849bf85fc79dbcceed496, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732123303416 2024-11-20T17:21:45,790 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#C#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:45,791 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/76a612499cee44ddb8d41bb0a36b7b81 is 50, key is test_row_0/C:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:45,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742011_1187 (size=12493) 2024-11-20T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:21:45,859 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T17:21:45,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T17:21:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:21:45,862 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:45,862 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:45,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:45,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:21:46,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:46,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:21:46,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:46,015 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:21:46,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:46,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:46,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:46,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:46,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:46,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:46,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c5dd06a340fe4859aa55268c372dcaf3_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123304062/Put/seqid=0 2024-11-20T17:21:46,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742012_1188 (size=12304) 2024-11-20T17:21:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:21:46,169 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#A#compaction#158 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:46,170 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/13d4ec428a36456095985fc660f76590 is 175, key is test_row_0/A:col10/1732123303421/Put/seqid=0 2024-11-20T17:21:46,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742013_1189 (size=31447) 2024-11-20T17:21:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:46,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:46,201 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/76a612499cee44ddb8d41bb0a36b7b81 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/76a612499cee44ddb8d41bb0a36b7b81 2024-11-20T17:21:46,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123366198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123366202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,206 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/C of 51c47a20e94658a652843ed744178633 into 76a612499cee44ddb8d41bb0a36b7b81(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:46,206 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:46,206 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/C, priority=13, startTime=1732123305733; duration=0sec 2024-11-20T17:21:46,206 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:46,206 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:46,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123366303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123366304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:46,436 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c5dd06a340fe4859aa55268c372dcaf3_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c5dd06a340fe4859aa55268c372dcaf3_51c47a20e94658a652843ed744178633 2024-11-20T17:21:46,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a953148e34b244ce902866430e02c6d1, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:46,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a953148e34b244ce902866430e02c6d1 is 175, key is test_row_0/A:col10/1732123304062/Put/seqid=0 2024-11-20T17:21:46,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742014_1190 (size=31105) 2024-11-20T17:21:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:21:46,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123366520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123366521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,581 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/13d4ec428a36456095985fc660f76590 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/13d4ec428a36456095985fc660f76590 2024-11-20T17:21:46,586 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/A of 51c47a20e94658a652843ed744178633 into 13d4ec428a36456095985fc660f76590(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:46,586 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:46,587 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/A, priority=13, startTime=1732123305733; duration=0sec 2024-11-20T17:21:46,587 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:46,587 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:46,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59636 deadline: 1732123366685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,688 DEBUG [Thread-702 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:46,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59596 deadline: 1732123366693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,695 DEBUG [Thread-706 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8198 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:46,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59586 deadline: 1732123366701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,703 DEBUG [Thread-704 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8207 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:21:46,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123366826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123366827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:46,860 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=167, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a953148e34b244ce902866430e02c6d1 2024-11-20T17:21:46,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/e389e372125c4a08849194da8337f4b0 is 50, key is test_row_0/B:col10/1732123304062/Put/seqid=0 2024-11-20T17:21:46,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742015_1191 (size=12151) 2024-11-20T17:21:46,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:21:47,154 INFO [master/d514dc944523:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T17:21:47,154 INFO [master/d514dc944523:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T17:21:47,280 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/e389e372125c4a08849194da8337f4b0 2024-11-20T17:21:47,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/bda04e23c3304651a91a8fa1a397e32f is 50, key is test_row_0/C:col10/1732123304062/Put/seqid=0 2024-11-20T17:21:47,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742016_1192 (size=12151) 2024-11-20T17:21:47,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:47,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123367328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:47,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:47,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123367334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:47,695 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/bda04e23c3304651a91a8fa1a397e32f 2024-11-20T17:21:47,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a953148e34b244ce902866430e02c6d1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1 2024-11-20T17:21:47,705 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1, entries=150, sequenceid=167, filesize=30.4 K 2024-11-20T17:21:47,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/e389e372125c4a08849194da8337f4b0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e389e372125c4a08849194da8337f4b0 2024-11-20T17:21:47,710 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e389e372125c4a08849194da8337f4b0, entries=150, sequenceid=167, filesize=11.9 K 2024-11-20T17:21:47,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/bda04e23c3304651a91a8fa1a397e32f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/bda04e23c3304651a91a8fa1a397e32f 2024-11-20T17:21:47,714 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/bda04e23c3304651a91a8fa1a397e32f, entries=150, sequenceid=167, filesize=11.9 K 2024-11-20T17:21:47,715 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 51c47a20e94658a652843ed744178633 in 1700ms, sequenceid=167, compaction requested=false 2024-11-20T17:21:47,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:47,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:47,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T17:21:47,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T17:21:47,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T17:21:47,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8540 sec 2024-11-20T17:21:47,719 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.8580 sec 2024-11-20T17:21:47,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:21:47,966 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T17:21:47,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:47,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T17:21:47,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:21:47,969 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:47,969 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:47,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:21:48,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:48,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:21:48,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:48,123 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:21:48,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:48,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:48,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:48,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:48,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:48,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:48,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e9adbee55d2c44e9aa20dcb245225420_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123306200/Put/seqid=0 2024-11-20T17:21:48,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742017_1193 (size=12304) 2024-11-20T17:21:48,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:21:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:48,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:48,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123368365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123368366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123368470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123368471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:48,541 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e9adbee55d2c44e9aa20dcb245225420_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e9adbee55d2c44e9aa20dcb245225420_51c47a20e94658a652843ed744178633 2024-11-20T17:21:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a461dedaa1754bbcb2db013013784349, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:48,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a461dedaa1754bbcb2db013013784349 is 175, key is test_row_0/A:col10/1732123306200/Put/seqid=0 2024-11-20T17:21:48,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742018_1194 (size=31105) 2024-11-20T17:21:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:21:48,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123368673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123368673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,952 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=183, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a461dedaa1754bbcb2db013013784349 2024-11-20T17:21:48,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/4861ce088557453cbcec1ab52b9062f7 is 50, key is test_row_0/B:col10/1732123306200/Put/seqid=0 2024-11-20T17:21:48,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742019_1195 (size=12151) 2024-11-20T17:21:48,972 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/4861ce088557453cbcec1ab52b9062f7 2024-11-20T17:21:48,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123368974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:48,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123368976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:48,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ec587342e4a24c17840140fbb93bef81 is 50, key is test_row_0/C:col10/1732123306200/Put/seqid=0 2024-11-20T17:21:48,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742020_1196 (size=12151) 2024-11-20T17:21:49,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:21:49,391 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ec587342e4a24c17840140fbb93bef81 2024-11-20T17:21:49,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a461dedaa1754bbcb2db013013784349 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349 2024-11-20T17:21:49,401 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349, entries=150, sequenceid=183, filesize=30.4 K 2024-11-20T17:21:49,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/4861ce088557453cbcec1ab52b9062f7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/4861ce088557453cbcec1ab52b9062f7 2024-11-20T17:21:49,413 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/4861ce088557453cbcec1ab52b9062f7, entries=150, sequenceid=183, filesize=11.9 K 2024-11-20T17:21:49,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ec587342e4a24c17840140fbb93bef81 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ec587342e4a24c17840140fbb93bef81 2024-11-20T17:21:49,419 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ec587342e4a24c17840140fbb93bef81, entries=150, sequenceid=183, filesize=11.9 K 2024-11-20T17:21:49,420 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 51c47a20e94658a652843ed744178633 in 1297ms, sequenceid=183, compaction requested=true 2024-11-20T17:21:49,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:49,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:49,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T17:21:49,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T17:21:49,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T17:21:49,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4530 sec 2024-11-20T17:21:49,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4590 sec 2024-11-20T17:21:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:49,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:21:49,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:49,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120809f3ca86eb24958a872dbee03cfe64e_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:49,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742021_1197 (size=14794) 2024-11-20T17:21:49,492 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:49,497 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120809f3ca86eb24958a872dbee03cfe64e_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120809f3ca86eb24958a872dbee03cfe64e_51c47a20e94658a652843ed744178633 2024-11-20T17:21:49,498 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/e7fe240277d742babda9e27bb0bb5dd6, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:49,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:49,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123369495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:49,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/e7fe240277d742babda9e27bb0bb5dd6 is 175, key is test_row_0/A:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:49,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:49,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123369499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:49,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742022_1198 (size=39749) 2024-11-20T17:21:49,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123369599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:49,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123369602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:49,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123369802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:49,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123369804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:49,903 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=206, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/e7fe240277d742babda9e27bb0bb5dd6 2024-11-20T17:21:49,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/1601363886fd4c72b0cca3edb007b9ca is 50, key is test_row_0/B:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:49,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742023_1199 (size=12151) 2024-11-20T17:21:50,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:21:50,072 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T17:21:50,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:50,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-20T17:21:50,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:21:50,075 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:50,076 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:50,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:50,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:50,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123370106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:50,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:50,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123370107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:50,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:21:50,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:50,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T17:21:50,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:50,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/1601363886fd4c72b0cca3edb007b9ca 2024-11-20T17:21:50,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/32612e3fa42b4a17b89d90f6de0fc8b0 is 50, key is test_row_0/C:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:50,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742024_1200 (size=12151) 2024-11-20T17:21:50,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:21:50,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:50,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T17:21:50,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:50,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:50,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T17:21:50,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:50,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123370612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:50,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123370613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:21:50,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:50,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T17:21:50,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:50,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:50,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/32612e3fa42b4a17b89d90f6de0fc8b0 2024-11-20T17:21:50,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/e7fe240277d742babda9e27bb0bb5dd6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6 2024-11-20T17:21:50,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6, entries=200, sequenceid=206, filesize=38.8 K 2024-11-20T17:21:50,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/1601363886fd4c72b0cca3edb007b9ca as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/1601363886fd4c72b0cca3edb007b9ca 2024-11-20T17:21:50,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/1601363886fd4c72b0cca3edb007b9ca, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T17:21:50,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/32612e3fa42b4a17b89d90f6de0fc8b0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32612e3fa42b4a17b89d90f6de0fc8b0 2024-11-20T17:21:50,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32612e3fa42b4a17b89d90f6de0fc8b0, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T17:21:50,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 51c47a20e94658a652843ed744178633 in 1282ms, sequenceid=206, compaction requested=true 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:50,760 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:50,760 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:50,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133406 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/B is initiating minor compaction (all files) 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/A is initiating minor compaction (all files) 2024-11-20T17:21:50,762 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/B in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,762 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/A in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,762 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/13d4ec428a36456095985fc660f76590, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=130.3 K 2024-11-20T17:21:50,762 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e96e7fb470034b85ae39c2562dc4df1d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e389e372125c4a08849194da8337f4b0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/4861ce088557453cbcec1ab52b9062f7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/1601363886fd4c72b0cca3edb007b9ca] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=47.8 K 2024-11-20T17:21:50,762 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/13d4ec428a36456095985fc660f76590, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6] 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e96e7fb470034b85ae39c2562dc4df1d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732123303416 2024-11-20T17:21:50,762 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13d4ec428a36456095985fc660f76590, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732123303416 2024-11-20T17:21:50,763 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e389e372125c4a08849194da8337f4b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732123304055 2024-11-20T17:21:50,763 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a953148e34b244ce902866430e02c6d1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732123304055 2024-11-20T17:21:50,763 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 4861ce088557453cbcec1ab52b9062f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732123306192 2024-11-20T17:21:50,763 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a461dedaa1754bbcb2db013013784349, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732123306192 2024-11-20T17:21:50,764 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 1601363886fd4c72b0cca3edb007b9ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732123308362 2024-11-20T17:21:50,764 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7fe240277d742babda9e27bb0bb5dd6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732123308362 2024-11-20T17:21:50,774 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#B#compaction#169 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:50,775 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/63e5ef2f9be74ff1b935fb226a4f52d7 is 50, key is test_row_0/B:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:50,775 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:50,778 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120fd9e13408a914844be6d2aff2d6a57c3_51c47a20e94658a652843ed744178633 store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:50,780 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120fd9e13408a914844be6d2aff2d6a57c3_51c47a20e94658a652843ed744178633, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:50,781 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fd9e13408a914844be6d2aff2d6a57c3_51c47a20e94658a652843ed744178633 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:50,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742025_1201 (size=12629) 2024-11-20T17:21:50,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742026_1202 (size=4469) 2024-11-20T17:21:50,787 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#A#compaction#170 average throughput is 2.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:50,788 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/7a727d9e5d83461bba69e6df3022a920 is 175, key is test_row_0/A:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:50,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742027_1203 (size=31583) 2024-11-20T17:21:50,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:50,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T17:21:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:50,841 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:21:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:50,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:50,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112050d7edc126aa4d1d9e9c0991690df53c_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123309493/Put/seqid=0 2024-11-20T17:21:50,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742028_1204 (size=12304) 2024-11-20T17:21:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:21:51,189 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/63e5ef2f9be74ff1b935fb226a4f52d7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/63e5ef2f9be74ff1b935fb226a4f52d7 2024-11-20T17:21:51,195 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51c47a20e94658a652843ed744178633/B of 51c47a20e94658a652843ed744178633 into 63e5ef2f9be74ff1b935fb226a4f52d7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:51,195 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:51,195 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/B, priority=12, startTime=1732123310760; duration=0sec 2024-11-20T17:21:51,195 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:51,195 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:51,195 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:21:51,197 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:21:51,197 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/C is initiating minor compaction (all files) 2024-11-20T17:21:51,197 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/C in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:51,197 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/76a612499cee44ddb8d41bb0a36b7b81, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/bda04e23c3304651a91a8fa1a397e32f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ec587342e4a24c17840140fbb93bef81, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32612e3fa42b4a17b89d90f6de0fc8b0] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=47.8 K 2024-11-20T17:21:51,198 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 76a612499cee44ddb8d41bb0a36b7b81, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732123303416 2024-11-20T17:21:51,198 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting bda04e23c3304651a91a8fa1a397e32f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732123304055 2024-11-20T17:21:51,199 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/7a727d9e5d83461bba69e6df3022a920 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/7a727d9e5d83461bba69e6df3022a920 2024-11-20T17:21:51,199 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ec587342e4a24c17840140fbb93bef81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732123306192 2024-11-20T17:21:51,200 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 32612e3fa42b4a17b89d90f6de0fc8b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732123308362 2024-11-20T17:21:51,208 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51c47a20e94658a652843ed744178633/A of 51c47a20e94658a652843ed744178633 into 7a727d9e5d83461bba69e6df3022a920(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:51,208 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:51,208 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/A, priority=12, startTime=1732123310760; duration=0sec 2024-11-20T17:21:51,208 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:51,208 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:51,211 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#C#compaction#172 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:51,211 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/4ae79dccfb504a4bb40c0f187a5e8518 is 50, key is test_row_0/C:col10/1732123308365/Put/seqid=0 2024-11-20T17:21:51,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742029_1205 (size=12629) 2024-11-20T17:21:51,222 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/4ae79dccfb504a4bb40c0f187a5e8518 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4ae79dccfb504a4bb40c0f187a5e8518 2024-11-20T17:21:51,228 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51c47a20e94658a652843ed744178633/C of 51c47a20e94658a652843ed744178633 into 4ae79dccfb504a4bb40c0f187a5e8518(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:51,228 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:51,228 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/C, priority=12, startTime=1732123310760; duration=0sec 2024-11-20T17:21:51,228 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:51,228 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:51,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:51,264 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112050d7edc126aa4d1d9e9c0991690df53c_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112050d7edc126aa4d1d9e9c0991690df53c_51c47a20e94658a652843ed744178633 2024-11-20T17:21:51,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/2796ab3ed15344fe8bb40507864be574, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:51,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/2796ab3ed15344fe8bb40507864be574 is 175, key is test_row_0/A:col10/1732123309493/Put/seqid=0 2024-11-20T17:21:51,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742030_1206 (size=31105) 2024-11-20T17:21:51,273 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=219, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/2796ab3ed15344fe8bb40507864be574 2024-11-20T17:21:51,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/85132f4c963047a2ba472166ac49d15d is 50, key is test_row_0/B:col10/1732123309493/Put/seqid=0 2024-11-20T17:21:51,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742031_1207 (size=12151) 2024-11-20T17:21:51,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:51,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:51,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:51,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123371643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:51,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123371644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:51,690 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/85132f4c963047a2ba472166ac49d15d 2024-11-20T17:21:51,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/cd507fd1704747d0a34b0b2309ba6954 is 50, key is test_row_0/C:col10/1732123309493/Put/seqid=0 2024-11-20T17:21:51,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742032_1208 (size=12151) 2024-11-20T17:21:51,731 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/cd507fd1704747d0a34b0b2309ba6954 2024-11-20T17:21:51,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/2796ab3ed15344fe8bb40507864be574 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574 2024-11-20T17:21:51,742 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574, entries=150, sequenceid=219, filesize=30.4 K 2024-11-20T17:21:51,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/85132f4c963047a2ba472166ac49d15d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/85132f4c963047a2ba472166ac49d15d 2024-11-20T17:21:51,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123371746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:51,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123371746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:51,748 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/85132f4c963047a2ba472166ac49d15d, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T17:21:51,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/cd507fd1704747d0a34b0b2309ba6954 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/cd507fd1704747d0a34b0b2309ba6954 2024-11-20T17:21:51,755 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/cd507fd1704747d0a34b0b2309ba6954, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T17:21:51,759 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 51c47a20e94658a652843ed744178633 in 918ms, sequenceid=219, compaction requested=false 2024-11-20T17:21:51,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:51,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:51,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-20T17:21:51,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-20T17:21:51,762 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T17:21:51,762 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6840 sec 2024-11-20T17:21:51,764 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.6880 sec 2024-11-20T17:21:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:51,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:21:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:51,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112089ad37147b7443b5bd09018deb616391_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:51,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742033_1209 (size=17284) 2024-11-20T17:21:51,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123371965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:51,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123371965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123372068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123372068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:21:52,179 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T17:21:52,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:21:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T17:21:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:52,182 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:21:52,183 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:21:52,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:21:52,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123372271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123372271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:52,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:52,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:52,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:52,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,365 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:52,369 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112089ad37147b7443b5bd09018deb616391_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112089ad37147b7443b5bd09018deb616391_51c47a20e94658a652843ed744178633 2024-11-20T17:21:52,371 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/584f06f216524e86930cc875586fc3d3, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:52,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/584f06f216524e86930cc875586fc3d3 is 175, key is test_row_0/A:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:52,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742034_1210 (size=48389) 2024-11-20T17:21:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:52,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:52,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123372575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123372577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:52,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:52,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:52,786 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/584f06f216524e86930cc875586fc3d3 2024-11-20T17:21:52,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:52,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c0bcd032c7e646b799eb8bdc7dc0c7b4 is 50, key is test_row_0/B:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742035_1211 (size=12151) 2024-11-20T17:21:52,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c0bcd032c7e646b799eb8bdc7dc0c7b4 2024-11-20T17:21:52,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d444ffd2acb048b8941c7a5d91754213 is 50, key is test_row_0/C:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:52,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742036_1212 (size=12151) 2024-11-20T17:21:52,947 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:52,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:52,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:52,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:52,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:53,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123373078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:53,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123373081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:53,099 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:53,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:53,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:53,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:53,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:21:53,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d444ffd2acb048b8941c7a5d91754213 2024-11-20T17:21:53,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/584f06f216524e86930cc875586fc3d3 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3 2024-11-20T17:21:53,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3, entries=250, sequenceid=247, filesize=47.3 K 2024-11-20T17:21:53,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c0bcd032c7e646b799eb8bdc7dc0c7b4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c0bcd032c7e646b799eb8bdc7dc0c7b4 2024-11-20T17:21:53,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c0bcd032c7e646b799eb8bdc7dc0c7b4, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T17:21:53,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d444ffd2acb048b8941c7a5d91754213 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d444ffd2acb048b8941c7a5d91754213 2024-11-20T17:21:53,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d444ffd2acb048b8941c7a5d91754213, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T17:21:53,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 51c47a20e94658a652843ed744178633 in 1294ms, sequenceid=247, compaction requested=true 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:53,245 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:53,245 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:53,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:53,247 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:53,247 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/A is initiating minor compaction (all files) 2024-11-20T17:21:53,247 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/A in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,247 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/7a727d9e5d83461bba69e6df3022a920, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=108.5 K 2024-11-20T17:21:53,247 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,247 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/7a727d9e5d83461bba69e6df3022a920, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3] 2024-11-20T17:21:53,247 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:53,248 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/B is initiating minor compaction (all files) 2024-11-20T17:21:53,248 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/B in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,248 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/63e5ef2f9be74ff1b935fb226a4f52d7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/85132f4c963047a2ba472166ac49d15d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c0bcd032c7e646b799eb8bdc7dc0c7b4] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=36.1 K 2024-11-20T17:21:53,248 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a727d9e5d83461bba69e6df3022a920, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732123308362 2024-11-20T17:21:53,248 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 63e5ef2f9be74ff1b935fb226a4f52d7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732123308362 2024-11-20T17:21:53,248 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2796ab3ed15344fe8bb40507864be574, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732123309493 2024-11-20T17:21:53,249 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 85132f4c963047a2ba472166ac49d15d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732123309493 2024-11-20T17:21:53,249 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 584f06f216524e86930cc875586fc3d3, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123311639 2024-11-20T17:21:53,249 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c0bcd032c7e646b799eb8bdc7dc0c7b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123311641 2024-11-20T17:21:53,252 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:53,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T17:21:53,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,255 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:21:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:53,257 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:53,258 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:53,258 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/751ccbdffb9a421f85745a71e9193e58 is 50, key is test_row_0/B:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:53,261 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d67f82722c7746d986aba1e3ccc9dcd8_51c47a20e94658a652843ed744178633 store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:53,263 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d67f82722c7746d986aba1e3ccc9dcd8_51c47a20e94658a652843ed744178633, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:53,264 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d67f82722c7746d986aba1e3ccc9dcd8_51c47a20e94658a652843ed744178633 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:53,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200f5d9744c87b4addb1e5dff6da8f1067_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123311961/Put/seqid=0 2024-11-20T17:21:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:53,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742037_1213 (size=12731) 2024-11-20T17:21:53,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742038_1214 (size=4469) 2024-11-20T17:21:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742039_1215 (size=12354) 2024-11-20T17:21:53,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:53,313 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200f5d9744c87b4addb1e5dff6da8f1067_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200f5d9744c87b4addb1e5dff6da8f1067_51c47a20e94658a652843ed744178633 2024-11-20T17:21:53,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/255b958ce37d438b934d5a5084fa0d8e, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:53,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/255b958ce37d438b934d5a5084fa0d8e is 175, key is test_row_0/A:col10/1732123311961/Put/seqid=0 2024-11-20T17:21:53,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742040_1216 (size=31155) 2024-11-20T17:21:53,692 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#A#compaction#179 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:53,693 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/5b6b22dba90f4cdfae3e3bbfb84df483 is 175, key is test_row_0/A:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:53,697 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/751ccbdffb9a421f85745a71e9193e58 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/751ccbdffb9a421f85745a71e9193e58 2024-11-20T17:21:53,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742041_1217 (size=31685) 2024-11-20T17:21:53,703 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/B of 51c47a20e94658a652843ed744178633 into 751ccbdffb9a421f85745a71e9193e58(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:53,703 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:53,703 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/B, priority=13, startTime=1732123313245; duration=0sec 2024-11-20T17:21:53,703 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:53,703 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:53,704 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:53,704 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/5b6b22dba90f4cdfae3e3bbfb84df483 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5b6b22dba90f4cdfae3e3bbfb84df483 2024-11-20T17:21:53,705 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:53,705 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/C is initiating minor compaction (all files) 2024-11-20T17:21:53,705 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/C in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:53,705 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4ae79dccfb504a4bb40c0f187a5e8518, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/cd507fd1704747d0a34b0b2309ba6954, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d444ffd2acb048b8941c7a5d91754213] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=36.1 K 2024-11-20T17:21:53,706 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ae79dccfb504a4bb40c0f187a5e8518, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732123308362 2024-11-20T17:21:53,707 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting cd507fd1704747d0a34b0b2309ba6954, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732123309493 2024-11-20T17:21:53,708 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d444ffd2acb048b8941c7a5d91754213, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123311641 2024-11-20T17:21:53,710 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/A of 51c47a20e94658a652843ed744178633 into 5b6b22dba90f4cdfae3e3bbfb84df483(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:53,710 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:53,710 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/A, priority=13, startTime=1732123313245; duration=0sec 2024-11-20T17:21:53,711 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:53,711 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:53,717 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#C#compaction#181 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:53,717 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d34bd3f6a4054d6db336f541a1395886 is 50, key is test_row_0/C:col10/1732123311949/Put/seqid=0 2024-11-20T17:21:53,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742042_1218 (size=12731) 2024-11-20T17:21:53,731 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=259, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/255b958ce37d438b934d5a5084fa0d8e 2024-11-20T17:21:53,748 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/d34bd3f6a4054d6db336f541a1395886 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d34bd3f6a4054d6db336f541a1395886 2024-11-20T17:21:53,754 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/C of 51c47a20e94658a652843ed744178633 into d34bd3f6a4054d6db336f541a1395886(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:53,754 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:53,754 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/C, priority=13, startTime=1732123313245; duration=0sec 2024-11-20T17:21:53,755 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:53,755 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:53,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/50ad06a0b32c404fb024e1ea283ee8f7 is 50, key is test_row_0/B:col10/1732123311961/Put/seqid=0 2024-11-20T17:21:53,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742043_1219 (size=12201) 2024-11-20T17:21:54,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:54,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. as already flushing 2024-11-20T17:21:54,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123374112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123374113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,160 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/50ad06a0b32c404fb024e1ea283ee8f7 2024-11-20T17:21:54,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/7fe19cc3c98a417dba619bb60b9d7422 is 50, key is test_row_0/C:col10/1732123311961/Put/seqid=0 2024-11-20T17:21:54,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742044_1220 (size=12201) 2024-11-20T17:21:54,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123374215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123374216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:54,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123374419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123374419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,573 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/7fe19cc3c98a417dba619bb60b9d7422 2024-11-20T17:21:54,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/255b958ce37d438b934d5a5084fa0d8e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e 2024-11-20T17:21:54,584 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e, entries=150, sequenceid=259, filesize=30.4 K 2024-11-20T17:21:54,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/50ad06a0b32c404fb024e1ea283ee8f7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50ad06a0b32c404fb024e1ea283ee8f7 2024-11-20T17:21:54,589 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50ad06a0b32c404fb024e1ea283ee8f7, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T17:21:54,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/7fe19cc3c98a417dba619bb60b9d7422 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/7fe19cc3c98a417dba619bb60b9d7422 2024-11-20T17:21:54,594 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/7fe19cc3c98a417dba619bb60b9d7422, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T17:21:54,595 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 51c47a20e94658a652843ed744178633 in 1341ms, sequenceid=259, compaction requested=false 2024-11-20T17:21:54,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:54,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:54,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T17:21:54,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T17:21:54,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T17:21:54,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4140 sec 2024-11-20T17:21:54,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.4180 sec 2024-11-20T17:21:54,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:54,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:21:54,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:54,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:54,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:54,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:54,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:54,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:54,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ff5a72b8fceb4a8791f351a6989f16b1_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123374736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742045_1221 (size=12454) 2024-11-20T17:21:54,737 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:54,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123374737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,741 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ff5a72b8fceb4a8791f351a6989f16b1_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ff5a72b8fceb4a8791f351a6989f16b1_51c47a20e94658a652843ed744178633 2024-11-20T17:21:54,743 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/5d86912a7cb94c4a9027ec9f296ae051, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:54,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/5d86912a7cb94c4a9027ec9f296ae051 is 175, key is test_row_0/A:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742046_1222 (size=31255) 2024-11-20T17:21:54,748 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/5d86912a7cb94c4a9027ec9f296ae051 2024-11-20T17:21:54,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/2ab90df3676f4a79afefc9bb5a149e07 is 50, key is test_row_0/B:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742047_1223 (size=12301) 2024-11-20T17:21:54,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/2ab90df3676f4a79afefc9bb5a149e07 2024-11-20T17:21:54,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/57873de81c524ebe869b7f2dd1302155 is 50, key is test_row_0/C:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742048_1224 (size=12301) 2024-11-20T17:21:54,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/57873de81c524ebe869b7f2dd1302155 2024-11-20T17:21:54,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/5d86912a7cb94c4a9027ec9f296ae051 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051 2024-11-20T17:21:54,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051, entries=150, sequenceid=288, filesize=30.5 K 2024-11-20T17:21:54,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/2ab90df3676f4a79afefc9bb5a149e07 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2ab90df3676f4a79afefc9bb5a149e07 2024-11-20T17:21:54,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2ab90df3676f4a79afefc9bb5a149e07, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T17:21:54,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/57873de81c524ebe869b7f2dd1302155 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/57873de81c524ebe869b7f2dd1302155 2024-11-20T17:21:54,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/57873de81c524ebe869b7f2dd1302155, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T17:21:54,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 51c47a20e94658a652843ed744178633 in 72ms, sequenceid=288, compaction requested=true 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:54,797 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:21:54,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:54,797 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:54,798 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:54,798 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:54,798 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/B is initiating minor compaction (all files) 2024-11-20T17:21:54,798 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/A is initiating minor compaction (all files) 2024-11-20T17:21:54,798 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/B in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:54,799 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/A in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:54,799 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/751ccbdffb9a421f85745a71e9193e58, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50ad06a0b32c404fb024e1ea283ee8f7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2ab90df3676f4a79afefc9bb5a149e07] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=36.4 K 2024-11-20T17:21:54,799 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5b6b22dba90f4cdfae3e3bbfb84df483, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=91.9 K 2024-11-20T17:21:54,799 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:54,799 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5b6b22dba90f4cdfae3e3bbfb84df483, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051] 2024-11-20T17:21:54,799 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 751ccbdffb9a421f85745a71e9193e58, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123311641 2024-11-20T17:21:54,799 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b6b22dba90f4cdfae3e3bbfb84df483, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123311641 2024-11-20T17:21:54,800 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 50ad06a0b32c404fb024e1ea283ee8f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732123311959 2024-11-20T17:21:54,800 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 255b958ce37d438b934d5a5084fa0d8e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732123311959 2024-11-20T17:21:54,800 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ab90df3676f4a79afefc9bb5a149e07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732123314109 2024-11-20T17:21:54,800 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d86912a7cb94c4a9027ec9f296ae051, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732123314109 2024-11-20T17:21:54,814 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#B#compaction#187 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:54,815 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/a1d93c8017f5425e943d0551bfd99fab is 50, key is test_row_0/B:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,816 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:54,821 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112097898f2a89864dd39c13e191af1d56f1_51c47a20e94658a652843ed744178633 store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:54,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742049_1225 (size=12983) 2024-11-20T17:21:54,822 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112097898f2a89864dd39c13e191af1d56f1_51c47a20e94658a652843ed744178633, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:54,823 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112097898f2a89864dd39c13e191af1d56f1_51c47a20e94658a652843ed744178633 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:54,829 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/a1d93c8017f5425e943d0551bfd99fab as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a1d93c8017f5425e943d0551bfd99fab 2024-11-20T17:21:54,834 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/B of 51c47a20e94658a652843ed744178633 into a1d93c8017f5425e943d0551bfd99fab(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:54,834 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:54,834 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/B, priority=13, startTime=1732123314797; duration=0sec 2024-11-20T17:21:54,835 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:21:54,835 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:54,835 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:21:54,836 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:21:54,836 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 51c47a20e94658a652843ed744178633/C is initiating minor compaction (all files) 2024-11-20T17:21:54,836 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51c47a20e94658a652843ed744178633/C in TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:54,836 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d34bd3f6a4054d6db336f541a1395886, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/7fe19cc3c98a417dba619bb60b9d7422, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/57873de81c524ebe869b7f2dd1302155] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp, totalSize=36.4 K 2024-11-20T17:21:54,837 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d34bd3f6a4054d6db336f541a1395886, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732123311641 2024-11-20T17:21:54,837 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fe19cc3c98a417dba619bb60b9d7422, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732123311959 2024-11-20T17:21:54,838 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 57873de81c524ebe869b7f2dd1302155, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732123314109 2024-11-20T17:21:54,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:21:54,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:54,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:54,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:54,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:54,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:54,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:54,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:54,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742050_1226 (size=4469) 2024-11-20T17:21:54,851 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#A#compaction#188 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:54,852 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a09085f4bfdd4e24a06c458c30777eaa is 175, key is test_row_0/A:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,853 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c47a20e94658a652843ed744178633#C#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:21:54,854 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ef7579f8a2bf4df1818a31fb9f4638e1 is 50, key is test_row_0/C:col10/1732123314723/Put/seqid=0 2024-11-20T17:21:54,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b0db4ce7e5d94e05bca6d8ae642d9f7a_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123314839/Put/seqid=0 2024-11-20T17:21:54,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742051_1227 (size=31937) 2024-11-20T17:21:54,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742052_1228 (size=14994) 2024-11-20T17:21:54,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742053_1229 (size=12983) 2024-11-20T17:21:54,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123374885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123374887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123374988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:54,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:54,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123374990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:55,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:55,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123375191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:55,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:55,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123375193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:55,278 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/a09085f4bfdd4e24a06c458c30777eaa as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a09085f4bfdd4e24a06c458c30777eaa 2024-11-20T17:21:55,279 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:55,284 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b0db4ce7e5d94e05bca6d8ae642d9f7a_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b0db4ce7e5d94e05bca6d8ae642d9f7a_51c47a20e94658a652843ed744178633 2024-11-20T17:21:55,284 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/A of 51c47a20e94658a652843ed744178633 into a09085f4bfdd4e24a06c458c30777eaa(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:55,284 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:55,284 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/A, priority=13, startTime=1732123314797; duration=0sec 2024-11-20T17:21:55,284 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:55,284 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:55,285 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/122bf98868b949479062d0dd31cfe2c8, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:55,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/122bf98868b949479062d0dd31cfe2c8 is 175, key is test_row_0/A:col10/1732123314839/Put/seqid=0 2024-11-20T17:21:55,287 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/ef7579f8a2bf4df1818a31fb9f4638e1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef7579f8a2bf4df1818a31fb9f4638e1 2024-11-20T17:21:55,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742054_1230 (size=39949) 2024-11-20T17:21:55,293 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51c47a20e94658a652843ed744178633/C of 51c47a20e94658a652843ed744178633 into ef7579f8a2bf4df1818a31fb9f4638e1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:21:55,293 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:55,293 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633., storeName=51c47a20e94658a652843ed744178633/C, priority=13, startTime=1732123314797; duration=0sec 2024-11-20T17:21:55,293 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:55,293 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:55,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123375495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:55,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123375497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:55,529 DEBUG [Thread-713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:55266 2024-11-20T17:21:55,529 DEBUG [Thread-713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:55,529 DEBUG [Thread-711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:55266 2024-11-20T17:21:55,529 DEBUG [Thread-711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:55,530 DEBUG [Thread-715 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:55266 2024-11-20T17:21:55,531 DEBUG [Thread-715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:55,533 DEBUG [Thread-717 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:55266 2024-11-20T17:21:55,533 DEBUG [Thread-717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:55,693 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/122bf98868b949479062d0dd31cfe2c8 2024-11-20T17:21:55,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c789da37ed684a3aa7036eb1e076d677 is 50, key is test_row_0/B:col10/1732123314839/Put/seqid=0 2024-11-20T17:21:55,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742055_1231 (size=12301) 2024-11-20T17:21:55,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:55,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59624 deadline: 1732123375998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:56,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:21:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59564 deadline: 1732123376002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 2024-11-20T17:21:56,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c789da37ed684a3aa7036eb1e076d677 2024-11-20T17:21:56,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/de1921b87ac749f4a2793e65f699d09b is 50, key is test_row_0/C:col10/1732123314839/Put/seqid=0 2024-11-20T17:21:56,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742056_1232 (size=12301) 2024-11-20T17:21:56,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T17:21:56,287 INFO [Thread-710 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T17:21:56,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/de1921b87ac749f4a2793e65f699d09b 2024-11-20T17:21:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/122bf98868b949479062d0dd31cfe2c8 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/122bf98868b949479062d0dd31cfe2c8 2024-11-20T17:21:56,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/122bf98868b949479062d0dd31cfe2c8, entries=200, sequenceid=301, filesize=39.0 K 2024-11-20T17:21:56,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/c789da37ed684a3aa7036eb1e076d677 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c789da37ed684a3aa7036eb1e076d677 2024-11-20T17:21:56,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c789da37ed684a3aa7036eb1e076d677, entries=150, sequenceid=301, filesize=12.0 K 2024-11-20T17:21:56,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/de1921b87ac749f4a2793e65f699d09b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/de1921b87ac749f4a2793e65f699d09b 2024-11-20T17:21:56,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/de1921b87ac749f4a2793e65f699d09b, entries=150, sequenceid=301, filesize=12.0 K 2024-11-20T17:21:56,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 51c47a20e94658a652843ed744178633 in 1692ms, sequenceid=301, compaction requested=false 2024-11-20T17:21:56,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:56,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 51c47a20e94658a652843ed744178633 2024-11-20T17:21:56,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:21:56,727 DEBUG [Thread-706 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:55266 2024-11-20T17:21:56,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:56,727 DEBUG [Thread-706 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:56,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:56,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:56,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:56,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:56,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:56,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e268ce9fbe8c41229f8501964872a556_51c47a20e94658a652843ed744178633 is 50, key is test_row_0/A:col10/1732123314886/Put/seqid=0 2024-11-20T17:21:56,733 DEBUG [Thread-702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x491ea2ee to 127.0.0.1:55266 2024-11-20T17:21:56,733 DEBUG [Thread-702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:56,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742057_1233 (size=12454) 2024-11-20T17:21:56,784 DEBUG [Thread-704 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:55266 2024-11-20T17:21:56,784 DEBUG [Thread-704 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:57,006 DEBUG [Thread-708 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2885d2d9 to 127.0.0.1:55266 2024-11-20T17:21:57,006 DEBUG [Thread-708 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:57,008 DEBUG [Thread-700 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:55266 2024-11-20T17:21:57,008 DEBUG [Thread-700 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7278 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7291 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3097 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9290 rows 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3073 2024-11-20T17:21:57,008 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9218 rows 2024-11-20T17:21:57,008 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:21:57,008 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d29de25 to 127.0.0.1:55266 2024-11-20T17:21:57,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:21:57,010 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:21:57,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:21:57,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:21:57,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:21:57,014 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123317014"}]},"ts":"1732123317014"} 2024-11-20T17:21:57,015 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:21:57,018 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:21:57,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:21:57,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, UNASSIGN}] 2024-11-20T17:21:57,020 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, UNASSIGN 2024-11-20T17:21:57,021 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:21:57,022 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:21:57,022 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:21:57,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:21:57,137 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:57,141 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e268ce9fbe8c41229f8501964872a556_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e268ce9fbe8c41229f8501964872a556_51c47a20e94658a652843ed744178633 2024-11-20T17:21:57,142 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/3e6a97ecbd924fc59465e5ddfe01f8c5, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:57,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/3e6a97ecbd924fc59465e5ddfe01f8c5 is 175, key is test_row_0/A:col10/1732123314886/Put/seqid=0 2024-11-20T17:21:57,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742058_1234 (size=31255) 2024-11-20T17:21:57,173 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:21:57,174 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 51c47a20e94658a652843ed744178633 2024-11-20T17:21:57,174 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:21:57,174 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 51c47a20e94658a652843ed744178633, disabling compactions & flushes 2024-11-20T17:21:57,174 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:57,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:21:57,547 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=328, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/3e6a97ecbd924fc59465e5ddfe01f8c5 2024-11-20T17:21:57,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/2061c6b2a84a4db5857761b8358f6a5f is 50, key is test_row_0/B:col10/1732123314886/Put/seqid=0 2024-11-20T17:21:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742059_1235 (size=12301) 2024-11-20T17:21:57,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:21:57,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/2061c6b2a84a4db5857761b8358f6a5f 2024-11-20T17:21:57,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/3496881d471042b69bb4a7a0183f6cf0 is 50, key is test_row_0/C:col10/1732123314886/Put/seqid=0 2024-11-20T17:21:57,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742060_1236 (size=12301) 2024-11-20T17:21:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:21:58,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/3496881d471042b69bb4a7a0183f6cf0 2024-11-20T17:21:58,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/3e6a97ecbd924fc59465e5ddfe01f8c5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/3e6a97ecbd924fc59465e5ddfe01f8c5 2024-11-20T17:21:58,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/3e6a97ecbd924fc59465e5ddfe01f8c5, entries=150, sequenceid=328, filesize=30.5 K 2024-11-20T17:21:58,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/2061c6b2a84a4db5857761b8358f6a5f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2061c6b2a84a4db5857761b8358f6a5f 2024-11-20T17:21:58,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2061c6b2a84a4db5857761b8358f6a5f, entries=150, sequenceid=328, filesize=12.0 K 2024-11-20T17:21:58,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/3496881d471042b69bb4a7a0183f6cf0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/3496881d471042b69bb4a7a0183f6cf0 2024-11-20T17:21:58,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/3496881d471042b69bb4a7a0183f6cf0, entries=150, sequenceid=328, filesize=12.0 K 2024-11-20T17:21:58,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=26.84 KB/27480 for 51c47a20e94658a652843ed744178633 in 1660ms, sequenceid=328, compaction requested=true 2024-11-20T17:21:58,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:21:58,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:21:58,386 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:58,386 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. because compaction request was cancelled 2024-11-20T17:21:58,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:58,387 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:A 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:58,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:58,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:58,387 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. because compaction request was cancelled 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. after waiting 0 ms 2024-11-20T17:21:58,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c47a20e94658a652843ed744178633:C, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:21:58,387 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:B 2024-11-20T17:21:58,387 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. because compaction request was cancelled 2024-11-20T17:21:58,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:21:58,387 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c47a20e94658a652843ed744178633:C 2024-11-20T17:21:58,387 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 51c47a20e94658a652843ed744178633 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=A 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=B 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51c47a20e94658a652843ed744178633, store=C 2024-11-20T17:21:58,387 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:21:58,393 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088b64124fbec483990337bc32968ec47_51c47a20e94658a652843ed744178633 is 50, key is test_row_1/A:col10/1732123317004/Put/seqid=0 2024-11-20T17:21:58,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742061_1237 (size=9914) 2024-11-20T17:21:58,799 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:21:58,802 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088b64124fbec483990337bc32968ec47_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088b64124fbec483990337bc32968ec47_51c47a20e94658a652843ed744178633 2024-11-20T17:21:58,803 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/c2011d0149e64b2ba54dfc48aa9ce80d, store: [table=TestAcidGuarantees family=A region=51c47a20e94658a652843ed744178633] 2024-11-20T17:21:58,803 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/c2011d0149e64b2ba54dfc48aa9ce80d is 175, key is test_row_1/A:col10/1732123317004/Put/seqid=0 2024-11-20T17:21:58,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742062_1238 (size=22561) 2024-11-20T17:21:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:21:59,208 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/c2011d0149e64b2ba54dfc48aa9ce80d 2024-11-20T17:21:59,214 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/0386ebe4bad540e3a6c7e3d33e6e5714 is 50, key is test_row_1/B:col10/1732123317004/Put/seqid=0 2024-11-20T17:21:59,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742063_1239 (size=9857) 2024-11-20T17:21:59,618 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/0386ebe4bad540e3a6c7e3d33e6e5714 2024-11-20T17:21:59,624 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/c13ceeb2cc20451397b618e9efa6b335 is 50, key is test_row_1/C:col10/1732123317004/Put/seqid=0 2024-11-20T17:21:59,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742064_1240 (size=9857) 2024-11-20T17:22:00,029 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/c13ceeb2cc20451397b618e9efa6b335 2024-11-20T17:22:00,033 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/A/c2011d0149e64b2ba54dfc48aa9ce80d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/c2011d0149e64b2ba54dfc48aa9ce80d 2024-11-20T17:22:00,036 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/c2011d0149e64b2ba54dfc48aa9ce80d, entries=100, sequenceid=335, filesize=22.0 K 2024-11-20T17:22:00,037 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/B/0386ebe4bad540e3a6c7e3d33e6e5714 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/0386ebe4bad540e3a6c7e3d33e6e5714 2024-11-20T17:22:00,040 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/0386ebe4bad540e3a6c7e3d33e6e5714, entries=100, sequenceid=335, filesize=9.6 K 2024-11-20T17:22:00,041 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/.tmp/C/c13ceeb2cc20451397b618e9efa6b335 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/c13ceeb2cc20451397b618e9efa6b335 2024-11-20T17:22:00,044 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/c13ceeb2cc20451397b618e9efa6b335, entries=100, sequenceid=335, filesize=9.6 K 2024-11-20T17:22:00,044 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 51c47a20e94658a652843ed744178633 in 1657ms, sequenceid=335, compaction requested=true 2024-11-20T17:22:00,045 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/afe6ed6da53f4dc4ae8e4303a0930a77, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/f916b89cb02a429fb2571d46d59f431f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/13d4ec428a36456095985fc660f76590, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/7a727d9e5d83461bba69e6df3022a920, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5b6b22dba90f4cdfae3e3bbfb84df483, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051] to archive 2024-11-20T17:22:00,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:00,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/cd97f675758b403e8c8f7883f22a1ce4 2024-11-20T17:22:00,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8966c1ab1dc74484b595ad46c4105ea6 2024-11-20T17:22:00,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/8e9c710d43cc479eba39858ce2f0c667 2024-11-20T17:22:00,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/68a61c8e891043ca9d66c5bbcc1d3b9b 2024-11-20T17:22:00,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/afe6ed6da53f4dc4ae8e4303a0930a77 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/afe6ed6da53f4dc4ae8e4303a0930a77 2024-11-20T17:22:00,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/bfcbd87c43b9440ba768eccded6c1e0c 2024-11-20T17:22:00,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/991fb005a2814d9bbf47ab0b39be3540 2024-11-20T17:22:00,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/f916b89cb02a429fb2571d46d59f431f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/f916b89cb02a429fb2571d46d59f431f 2024-11-20T17:22:00,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/4a74b7432e5b4e33841b20a6b2760033 2024-11-20T17:22:00,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/13d4ec428a36456095985fc660f76590 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/13d4ec428a36456095985fc660f76590 2024-11-20T17:22:00,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/726720490825493e8282eea8623bcd22 2024-11-20T17:22:00,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a953148e34b244ce902866430e02c6d1 2024-11-20T17:22:00,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a461dedaa1754bbcb2db013013784349 2024-11-20T17:22:00,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/e7fe240277d742babda9e27bb0bb5dd6 2024-11-20T17:22:00,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/7a727d9e5d83461bba69e6df3022a920 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/7a727d9e5d83461bba69e6df3022a920 2024-11-20T17:22:00,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/2796ab3ed15344fe8bb40507864be574 2024-11-20T17:22:00,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/584f06f216524e86930cc875586fc3d3 2024-11-20T17:22:00,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5b6b22dba90f4cdfae3e3bbfb84df483 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5b6b22dba90f4cdfae3e3bbfb84df483 2024-11-20T17:22:00,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/255b958ce37d438b934d5a5084fa0d8e 2024-11-20T17:22:00,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/5d86912a7cb94c4a9027ec9f296ae051 2024-11-20T17:22:00,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50b34e4e4231415c9919344c4dccb0b4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/050152866c634ec68a2e794ce4d0b2c6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/370aeb39058d4059afc23c2930502ef6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/f2a3f9f2a8e948599628cc5478fe2d56, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/90091ff993db4ff4b8f58245ba434429, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/d88cf25c8c3a42129e5a8180d1666a20, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/46a59a32ab6b4323be02bf151e7bc879, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a6f0940a06f142ee9f2878e516811b14, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/3d32826dbb47443ab558808412343d84, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e96e7fb470034b85ae39c2562dc4df1d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c728f80159bc4bceba8a1ede3af75685, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e389e372125c4a08849194da8337f4b0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/4861ce088557453cbcec1ab52b9062f7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/63e5ef2f9be74ff1b935fb226a4f52d7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/1601363886fd4c72b0cca3edb007b9ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/85132f4c963047a2ba472166ac49d15d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/751ccbdffb9a421f85745a71e9193e58, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c0bcd032c7e646b799eb8bdc7dc0c7b4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50ad06a0b32c404fb024e1ea283ee8f7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2ab90df3676f4a79afefc9bb5a149e07] to archive 2024-11-20T17:22:00,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:00,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50b34e4e4231415c9919344c4dccb0b4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50b34e4e4231415c9919344c4dccb0b4 2024-11-20T17:22:00,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/050152866c634ec68a2e794ce4d0b2c6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/050152866c634ec68a2e794ce4d0b2c6 2024-11-20T17:22:00,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/370aeb39058d4059afc23c2930502ef6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/370aeb39058d4059afc23c2930502ef6 2024-11-20T17:22:00,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/f2a3f9f2a8e948599628cc5478fe2d56 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/f2a3f9f2a8e948599628cc5478fe2d56 2024-11-20T17:22:00,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/90091ff993db4ff4b8f58245ba434429 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/90091ff993db4ff4b8f58245ba434429 2024-11-20T17:22:00,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/d88cf25c8c3a42129e5a8180d1666a20 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/d88cf25c8c3a42129e5a8180d1666a20 2024-11-20T17:22:00,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/46a59a32ab6b4323be02bf151e7bc879 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/46a59a32ab6b4323be02bf151e7bc879 2024-11-20T17:22:00,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a6f0940a06f142ee9f2878e516811b14 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a6f0940a06f142ee9f2878e516811b14 2024-11-20T17:22:00,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/3d32826dbb47443ab558808412343d84 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/3d32826dbb47443ab558808412343d84 2024-11-20T17:22:00,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e96e7fb470034b85ae39c2562dc4df1d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e96e7fb470034b85ae39c2562dc4df1d 2024-11-20T17:22:00,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c728f80159bc4bceba8a1ede3af75685 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c728f80159bc4bceba8a1ede3af75685 2024-11-20T17:22:00,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e389e372125c4a08849194da8337f4b0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/e389e372125c4a08849194da8337f4b0 2024-11-20T17:22:00,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/4861ce088557453cbcec1ab52b9062f7 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/4861ce088557453cbcec1ab52b9062f7 2024-11-20T17:22:00,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/63e5ef2f9be74ff1b935fb226a4f52d7 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/63e5ef2f9be74ff1b935fb226a4f52d7 2024-11-20T17:22:00,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/1601363886fd4c72b0cca3edb007b9ca to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/1601363886fd4c72b0cca3edb007b9ca 2024-11-20T17:22:00,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/85132f4c963047a2ba472166ac49d15d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/85132f4c963047a2ba472166ac49d15d 2024-11-20T17:22:00,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/751ccbdffb9a421f85745a71e9193e58 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/751ccbdffb9a421f85745a71e9193e58 2024-11-20T17:22:00,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c0bcd032c7e646b799eb8bdc7dc0c7b4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c0bcd032c7e646b799eb8bdc7dc0c7b4 2024-11-20T17:22:00,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50ad06a0b32c404fb024e1ea283ee8f7 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/50ad06a0b32c404fb024e1ea283ee8f7 2024-11-20T17:22:00,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2ab90df3676f4a79afefc9bb5a149e07 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2ab90df3676f4a79afefc9bb5a149e07 2024-11-20T17:22:00,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/47105e9ee0e74eb8bde444d62e66f67a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4d8fee83fb684961a5df56a294312b18, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d86b815d5339429fa8e8396a30644009, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/11e8ac8373054dde8fc5ef1401594a2a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef001bd4f4a441f3b060cd1b3911daed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/65a5c740c060406799b2d0422b05f2ee, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/f4907b5aab06448a85730b76062da06b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/40935847f77e4c59977cf38abb42eec6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/26afdb17abe24757a3b4acf8a392d978, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/76a612499cee44ddb8d41bb0a36b7b81, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32ce100e3d6849bf85fc79dbcceed496, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/bda04e23c3304651a91a8fa1a397e32f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ec587342e4a24c17840140fbb93bef81, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4ae79dccfb504a4bb40c0f187a5e8518, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32612e3fa42b4a17b89d90f6de0fc8b0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/cd507fd1704747d0a34b0b2309ba6954, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d34bd3f6a4054d6db336f541a1395886, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d444ffd2acb048b8941c7a5d91754213, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/7fe19cc3c98a417dba619bb60b9d7422, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/57873de81c524ebe869b7f2dd1302155] to archive 2024-11-20T17:22:00,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:00,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/47105e9ee0e74eb8bde444d62e66f67a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/47105e9ee0e74eb8bde444d62e66f67a 2024-11-20T17:22:00,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4d8fee83fb684961a5df56a294312b18 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4d8fee83fb684961a5df56a294312b18 2024-11-20T17:22:00,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d86b815d5339429fa8e8396a30644009 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d86b815d5339429fa8e8396a30644009 2024-11-20T17:22:00,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/11e8ac8373054dde8fc5ef1401594a2a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/11e8ac8373054dde8fc5ef1401594a2a 2024-11-20T17:22:00,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef001bd4f4a441f3b060cd1b3911daed to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef001bd4f4a441f3b060cd1b3911daed 2024-11-20T17:22:00,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/65a5c740c060406799b2d0422b05f2ee to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/65a5c740c060406799b2d0422b05f2ee 2024-11-20T17:22:00,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/f4907b5aab06448a85730b76062da06b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/f4907b5aab06448a85730b76062da06b 2024-11-20T17:22:00,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/40935847f77e4c59977cf38abb42eec6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/40935847f77e4c59977cf38abb42eec6 2024-11-20T17:22:00,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/26afdb17abe24757a3b4acf8a392d978 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/26afdb17abe24757a3b4acf8a392d978 2024-11-20T17:22:00,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/76a612499cee44ddb8d41bb0a36b7b81 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/76a612499cee44ddb8d41bb0a36b7b81 2024-11-20T17:22:00,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32ce100e3d6849bf85fc79dbcceed496 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32ce100e3d6849bf85fc79dbcceed496 2024-11-20T17:22:00,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/bda04e23c3304651a91a8fa1a397e32f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/bda04e23c3304651a91a8fa1a397e32f 2024-11-20T17:22:00,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ec587342e4a24c17840140fbb93bef81 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ec587342e4a24c17840140fbb93bef81 2024-11-20T17:22:00,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4ae79dccfb504a4bb40c0f187a5e8518 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/4ae79dccfb504a4bb40c0f187a5e8518 2024-11-20T17:22:00,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32612e3fa42b4a17b89d90f6de0fc8b0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/32612e3fa42b4a17b89d90f6de0fc8b0 2024-11-20T17:22:00,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/cd507fd1704747d0a34b0b2309ba6954 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/cd507fd1704747d0a34b0b2309ba6954 2024-11-20T17:22:00,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d34bd3f6a4054d6db336f541a1395886 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d34bd3f6a4054d6db336f541a1395886 2024-11-20T17:22:00,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d444ffd2acb048b8941c7a5d91754213 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/d444ffd2acb048b8941c7a5d91754213 2024-11-20T17:22:00,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/7fe19cc3c98a417dba619bb60b9d7422 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/7fe19cc3c98a417dba619bb60b9d7422 2024-11-20T17:22:00,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/57873de81c524ebe869b7f2dd1302155 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/57873de81c524ebe869b7f2dd1302155 2024-11-20T17:22:00,113 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=4 2024-11-20T17:22:00,114 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633. 2024-11-20T17:22:00,114 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 51c47a20e94658a652843ed744178633: 2024-11-20T17:22:00,116 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 51c47a20e94658a652843ed744178633 2024-11-20T17:22:00,116 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=51c47a20e94658a652843ed744178633, regionState=CLOSED 2024-11-20T17:22:00,118 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T17:22:00,118 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 51c47a20e94658a652843ed744178633, server=d514dc944523,40121,1732123262111 in 3.0950 sec 2024-11-20T17:22:00,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-20T17:22:00,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=51c47a20e94658a652843ed744178633, UNASSIGN in 3.0980 sec 2024-11-20T17:22:00,121 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T17:22:00,121 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 3.1020 sec 2024-11-20T17:22:00,122 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123320122"}]},"ts":"1732123320122"} 2024-11-20T17:22:00,122 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:22:00,125 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:22:00,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 3.1140 sec 2024-11-20T17:22:00,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:22:01,118 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T17:22:01,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:22:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,120 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:22:01,121 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,122 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,125 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/recovered.edits] 2024-11-20T17:22:01,127 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/122bf98868b949479062d0dd31cfe2c8 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/122bf98868b949479062d0dd31cfe2c8 2024-11-20T17:22:01,128 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/3e6a97ecbd924fc59465e5ddfe01f8c5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/3e6a97ecbd924fc59465e5ddfe01f8c5 2024-11-20T17:22:01,130 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a09085f4bfdd4e24a06c458c30777eaa to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/a09085f4bfdd4e24a06c458c30777eaa 2024-11-20T17:22:01,131 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/c2011d0149e64b2ba54dfc48aa9ce80d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/A/c2011d0149e64b2ba54dfc48aa9ce80d 2024-11-20T17:22:01,133 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/0386ebe4bad540e3a6c7e3d33e6e5714 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/0386ebe4bad540e3a6c7e3d33e6e5714 2024-11-20T17:22:01,134 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2061c6b2a84a4db5857761b8358f6a5f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/2061c6b2a84a4db5857761b8358f6a5f 2024-11-20T17:22:01,135 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a1d93c8017f5425e943d0551bfd99fab to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/a1d93c8017f5425e943d0551bfd99fab 2024-11-20T17:22:01,136 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c789da37ed684a3aa7036eb1e076d677 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/B/c789da37ed684a3aa7036eb1e076d677 2024-11-20T17:22:01,139 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/3496881d471042b69bb4a7a0183f6cf0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/3496881d471042b69bb4a7a0183f6cf0 2024-11-20T17:22:01,140 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/c13ceeb2cc20451397b618e9efa6b335 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/c13ceeb2cc20451397b618e9efa6b335 2024-11-20T17:22:01,141 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/de1921b87ac749f4a2793e65f699d09b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/de1921b87ac749f4a2793e65f699d09b 2024-11-20T17:22:01,142 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef7579f8a2bf4df1818a31fb9f4638e1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/C/ef7579f8a2bf4df1818a31fb9f4638e1 2024-11-20T17:22:01,145 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/recovered.edits/338.seqid to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633/recovered.edits/338.seqid 2024-11-20T17:22:01,145 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,145 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:22:01,146 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:22:01,146 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T17:22:01,150 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200f5d9744c87b4addb1e5dff6da8f1067_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200f5d9744c87b4addb1e5dff6da8f1067_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,151 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120318c4212ac30474698ab1d0057a00bcc_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120318c4212ac30474698ab1d0057a00bcc_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,152 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112048531c39242d4da8a6bad57cdc870593_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112048531c39242d4da8a6bad57cdc870593_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,154 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112050d7edc126aa4d1d9e9c0991690df53c_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112050d7edc126aa4d1d9e9c0991690df53c_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,155 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052f9bc979f5249b28f2b8c669a3692e4_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112052f9bc979f5249b28f2b8c669a3692e4_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,156 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206dfd30535c284573bdf1924f362e13b7_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206dfd30535c284573bdf1924f362e13b7_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,157 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120809f3ca86eb24958a872dbee03cfe64e_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120809f3ca86eb24958a872dbee03cfe64e_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,158 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120818c5b6b2b764846b0340b2d1fd8d703_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120818c5b6b2b764846b0340b2d1fd8d703_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,159 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120889a03966da64c6daffc415e66aab84b_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120889a03966da64c6daffc415e66aab84b_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,160 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088b64124fbec483990337bc32968ec47_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088b64124fbec483990337bc32968ec47_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,161 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112089ad37147b7443b5bd09018deb616391_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112089ad37147b7443b5bd09018deb616391_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,162 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b0db4ce7e5d94e05bca6d8ae642d9f7a_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b0db4ce7e5d94e05bca6d8ae642d9f7a_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,162 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120baead337095b46f2bba998f858e5d43f_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120baead337095b46f2bba998f858e5d43f_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,163 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bc368e321f364ff4a0f6e26e09a4e390_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bc368e321f364ff4a0f6e26e09a4e390_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,164 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c5dd06a340fe4859aa55268c372dcaf3_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c5dd06a340fe4859aa55268c372dcaf3_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,165 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e268ce9fbe8c41229f8501964872a556_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e268ce9fbe8c41229f8501964872a556_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,166 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e9adbee55d2c44e9aa20dcb245225420_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e9adbee55d2c44e9aa20dcb245225420_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,168 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ff5a72b8fceb4a8791f351a6989f16b1_51c47a20e94658a652843ed744178633 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ff5a72b8fceb4a8791f351a6989f16b1_51c47a20e94658a652843ed744178633 2024-11-20T17:22:01,168 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:22:01,170 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,173 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:22:01,175 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:22:01,176 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,176 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:22:01,176 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123321176"}]},"ts":"9223372036854775807"} 2024-11-20T17:22:01,178 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:22:01,178 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 51c47a20e94658a652843ed744178633, NAME => 'TestAcidGuarantees,,1732123292402.51c47a20e94658a652843ed744178633.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:22:01,178 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:22:01,178 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123321178"}]},"ts":"9223372036854775807"} 2024-11-20T17:22:01,180 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:22:01,183 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 64 msec 2024-11-20T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:22:01,222 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T17:22:01,233 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=242 (was 241) Potentially hanging thread: hconnection-0x5602a74-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1161993582_22 at /127.0.0.1:52848 [Waiting for operation #1052] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_227606640_22 at /127.0.0.1:60030 [Waiting for operation #714] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1161993582_22 at /127.0.0.1:38390 [Waiting for operation #1039] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_227606640_22 at /127.0.0.1:60038 [Waiting for operation #709] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5602a74-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=464 (was 461) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=314 (was 239) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6224 (was 6335) 2024-11-20T17:22:01,243 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=242, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=314, ProcessCount=11, AvailableMemoryMB=6224 2024-11-20T17:22:01,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:22:01,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:01,246 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:22:01,246 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:01,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-20T17:22:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T17:22:01,247 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:22:01,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742065_1241 (size=960) 2024-11-20T17:22:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T17:22:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T17:22:01,655 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:22:01,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742066_1242 (size=53) 2024-11-20T17:22:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T17:22:01,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T17:22:02,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:02,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3030edb9ba565d9149afc5328873a8ef, disabling compactions & flushes 2024-11-20T17:22:02,063 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. after waiting 0 ms 2024-11-20T17:22:02,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,063 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:02,064 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:22:02,064 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123322064"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123322064"}]},"ts":"1732123322064"} 2024-11-20T17:22:02,065 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:22:02,066 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:22:02,066 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123322066"}]},"ts":"1732123322066"} 2024-11-20T17:22:02,067 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:22:02,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, ASSIGN}] 2024-11-20T17:22:02,074 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, ASSIGN 2024-11-20T17:22:02,074 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:22:02,225 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=3030edb9ba565d9149afc5328873a8ef, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:02,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:02,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T17:22:02,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:02,381 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,381 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:22:02,381 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,381 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:02,381 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,382 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,383 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,384 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:02,384 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3030edb9ba565d9149afc5328873a8ef columnFamilyName A 2024-11-20T17:22:02,384 DEBUG [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:02,385 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.HStore(327): Store=3030edb9ba565d9149afc5328873a8ef/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:02,385 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,386 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:02,386 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3030edb9ba565d9149afc5328873a8ef columnFamilyName B 2024-11-20T17:22:02,386 DEBUG [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:02,386 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.HStore(327): Store=3030edb9ba565d9149afc5328873a8ef/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:02,387 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,387 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:02,388 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3030edb9ba565d9149afc5328873a8ef columnFamilyName C 2024-11-20T17:22:02,388 DEBUG [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:02,388 INFO [StoreOpener-3030edb9ba565d9149afc5328873a8ef-1 {}] regionserver.HStore(327): Store=3030edb9ba565d9149afc5328873a8ef/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:02,388 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,389 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,389 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,390 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:22:02,391 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:02,393 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:22:02,394 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 3030edb9ba565d9149afc5328873a8ef; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74039910, jitterRate=0.10328063368797302}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:22:02,394 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:02,395 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., pid=70, masterSystemTime=1732123322378 2024-11-20T17:22:02,396 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,396 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:02,397 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=3030edb9ba565d9149afc5328873a8ef, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:02,399 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T17:22:02,399 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 in 172 msec 2024-11-20T17:22:02,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-20T17:22:02,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, ASSIGN in 326 msec 2024-11-20T17:22:02,401 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:22:02,401 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123322401"}]},"ts":"1732123322401"} 2024-11-20T17:22:02,402 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:22:02,405 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:22:02,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1600 sec 2024-11-20T17:22:03,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-20T17:22:03,351 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-20T17:22:03,353 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a569490 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1ac389 2024-11-20T17:22:03,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44645c55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,358 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,360 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,361 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:22:03,362 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:22:03,364 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-11-20T17:22:03,368 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,370 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-11-20T17:22:03,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-11-20T17:22:03,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,380 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-11-20T17:22:03,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,385 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-11-20T17:22:03,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,390 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-11-20T17:22:03,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,395 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-11-20T17:22:03,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,400 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-11-20T17:22:03,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,404 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-11-20T17:22:03,408 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,409 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-11-20T17:22:03,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:03,415 DEBUG [hconnection-0x7bc8155-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:03,416 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T17:22:03,417 DEBUG [hconnection-0x69b10c1c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,418 DEBUG [hconnection-0x2646fbcd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,418 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:03,418 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:22:03,419 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:03,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:03,419 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,422 DEBUG [hconnection-0x1563a286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,422 DEBUG [hconnection-0x142dd5c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,422 DEBUG [hconnection-0x13a547a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,423 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40104, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,423 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,423 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,423 DEBUG [hconnection-0x371f42d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,424 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,425 DEBUG [hconnection-0x1f6aa33a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,425 DEBUG [hconnection-0x7e41a7db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,426 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40134, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,426 DEBUG [hconnection-0x5af7f616-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:03,426 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,427 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:03,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:22:03,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:03,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:03,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:03,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:03,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:03,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:03,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123383459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bfe15609564444eba1c52b1cc8aac1ed is 50, key is test_row_0/A:col10/1732123323433/Put/seqid=0 2024-11-20T17:22:03,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123383462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123383462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123383462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123383463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742067_1243 (size=12001) 2024-11-20T17:22:03,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bfe15609564444eba1c52b1cc8aac1ed 2024-11-20T17:22:03,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:22:03,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/1680a662e84d45afa1626e914f858543 is 50, key is test_row_0/B:col10/1732123323433/Put/seqid=0 2024-11-20T17:22:03,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742068_1244 (size=12001) 2024-11-20T17:22:03,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/1680a662e84d45afa1626e914f858543 2024-11-20T17:22:03,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123383565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,574 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:03,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123383574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:22:03,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123383575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:03,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123383575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:03,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123383575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:03,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/dbb7158ba5e7465f98864423268575a4 is 50, key is test_row_0/C:col10/1732123323433/Put/seqid=0 2024-11-20T17:22:03,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742069_1245 (size=12001) 2024-11-20T17:22:03,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/dbb7158ba5e7465f98864423268575a4 2024-11-20T17:22:03,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bfe15609564444eba1c52b1cc8aac1ed as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bfe15609564444eba1c52b1cc8aac1ed 2024-11-20T17:22:03,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bfe15609564444eba1c52b1cc8aac1ed, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:22:03,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/1680a662e84d45afa1626e914f858543 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1680a662e84d45afa1626e914f858543 2024-11-20T17:22:03,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1680a662e84d45afa1626e914f858543, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:22:03,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/dbb7158ba5e7465f98864423268575a4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/dbb7158ba5e7465f98864423268575a4 2024-11-20T17:22:03,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/dbb7158ba5e7465f98864423268575a4, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:22:03,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 3030edb9ba565d9149afc5328873a8ef in 187ms, sequenceid=14, compaction requested=false 2024-11-20T17:22:03,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:22:03,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:03,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:22:03,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:03,729 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:22:03,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:03,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:03,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:03,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:03,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:03,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:03,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/7cdd7bfbe0ff44b5845419e89bdca5b6 is 50, key is test_row_0/A:col10/1732123323461/Put/seqid=0 2024-11-20T17:22:03,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742070_1246 (size=12001) 2024-11-20T17:22:03,747 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/7cdd7bfbe0ff44b5845419e89bdca5b6 2024-11-20T17:22:03,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/17ca120437704d6db0537dc2206259ec is 50, key is test_row_0/B:col10/1732123323461/Put/seqid=0 2024-11-20T17:22:03,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:03,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123383780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123383780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123383781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123383781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123383782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742071_1247 (size=12001) 2024-11-20T17:22:03,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123383884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123383884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123383885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123383886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:03,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123383886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:22:04,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123384087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123384086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123384088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123384088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123384090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,200 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/17ca120437704d6db0537dc2206259ec 2024-11-20T17:22:04,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/02da981306b6494c8eac8de2237c7ec6 is 50, key is test_row_0/C:col10/1732123323461/Put/seqid=0 2024-11-20T17:22:04,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742072_1248 (size=12001) 2024-11-20T17:22:04,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123384389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123384391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123384392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123384392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123384393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:22:04,615 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/02da981306b6494c8eac8de2237c7ec6 2024-11-20T17:22:04,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/7cdd7bfbe0ff44b5845419e89bdca5b6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/7cdd7bfbe0ff44b5845419e89bdca5b6 2024-11-20T17:22:04,623 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/7cdd7bfbe0ff44b5845419e89bdca5b6, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T17:22:04,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/17ca120437704d6db0537dc2206259ec as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/17ca120437704d6db0537dc2206259ec 2024-11-20T17:22:04,629 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/17ca120437704d6db0537dc2206259ec, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T17:22:04,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/02da981306b6494c8eac8de2237c7ec6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/02da981306b6494c8eac8de2237c7ec6 2024-11-20T17:22:04,635 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/02da981306b6494c8eac8de2237c7ec6, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T17:22:04,636 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3030edb9ba565d9149afc5328873a8ef in 907ms, sequenceid=38, compaction requested=false 2024-11-20T17:22:04,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:04,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:04,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T17:22:04,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T17:22:04,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T17:22:04,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2180 sec 2024-11-20T17:22:04,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.2250 sec 2024-11-20T17:22:04,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:22:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:04,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/85a16885936f42a8904ce63be9266992 is 50, key is test_row_0/A:col10/1732123323776/Put/seqid=0 2024-11-20T17:22:04,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742073_1249 (size=12001) 2024-11-20T17:22:04,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123384916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123384916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123384917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123384917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:04,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123384918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123385021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123385021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123385021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123385021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123385021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123385223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123385224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123385224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123385224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123385225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/85a16885936f42a8904ce63be9266992 2024-11-20T17:22:05,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/0e0ddf8140ee4827a793853ed1c9dca8 is 50, key is test_row_0/B:col10/1732123323776/Put/seqid=0 2024-11-20T17:22:05,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742074_1250 (size=12001) 2024-11-20T17:22:05,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/0e0ddf8140ee4827a793853ed1c9dca8 2024-11-20T17:22:05,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/00b187fd0f0841858f0ddcc5e3d183d5 is 50, key is test_row_0/C:col10/1732123323776/Put/seqid=0 2024-11-20T17:22:05,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742075_1251 (size=12001) 2024-11-20T17:22:05,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/00b187fd0f0841858f0ddcc5e3d183d5 2024-11-20T17:22:05,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/85a16885936f42a8904ce63be9266992 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/85a16885936f42a8904ce63be9266992 2024-11-20T17:22:05,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/85a16885936f42a8904ce63be9266992, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:22:05,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/0e0ddf8140ee4827a793853ed1c9dca8 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e0ddf8140ee4827a793853ed1c9dca8 2024-11-20T17:22:05,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e0ddf8140ee4827a793853ed1c9dca8, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:22:05,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/00b187fd0f0841858f0ddcc5e3d183d5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/00b187fd0f0841858f0ddcc5e3d183d5 2024-11-20T17:22:05,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/00b187fd0f0841858f0ddcc5e3d183d5, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:22:05,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3030edb9ba565d9149afc5328873a8ef in 460ms, sequenceid=51, compaction requested=true 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:05,356 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:05,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:05,356 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:05,357 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:05,357 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:05,357 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:05,357 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:05,357 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,357 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,358 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1680a662e84d45afa1626e914f858543, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/17ca120437704d6db0537dc2206259ec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e0ddf8140ee4827a793853ed1c9dca8] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=35.2 K 2024-11-20T17:22:05,358 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bfe15609564444eba1c52b1cc8aac1ed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/7cdd7bfbe0ff44b5845419e89bdca5b6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/85a16885936f42a8904ce63be9266992] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=35.2 K 2024-11-20T17:22:05,358 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 1680a662e84d45afa1626e914f858543, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123323431 2024-11-20T17:22:05,358 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfe15609564444eba1c52b1cc8aac1ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123323431 2024-11-20T17:22:05,359 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 17ca120437704d6db0537dc2206259ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732123323459 2024-11-20T17:22:05,359 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cdd7bfbe0ff44b5845419e89bdca5b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732123323459 2024-11-20T17:22:05,359 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e0ddf8140ee4827a793853ed1c9dca8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123323776 2024-11-20T17:22:05,359 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85a16885936f42a8904ce63be9266992, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123323776 2024-11-20T17:22:05,374 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#208 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:05,375 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/1a59e34846874f68a876e51266d1ae98 is 50, key is test_row_0/B:col10/1732123323776/Put/seqid=0 2024-11-20T17:22:05,378 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#209 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:05,378 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/000e494816d64c35b8ab85b711051b74 is 50, key is test_row_0/A:col10/1732123323776/Put/seqid=0 2024-11-20T17:22:05,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742076_1252 (size=12104) 2024-11-20T17:22:05,391 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/1a59e34846874f68a876e51266d1ae98 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1a59e34846874f68a876e51266d1ae98 2024-11-20T17:22:05,396 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into 1a59e34846874f68a876e51266d1ae98(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:05,397 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:05,397 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=13, startTime=1732123325356; duration=0sec 2024-11-20T17:22:05,397 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:05,397 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:05,397 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:05,398 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:05,398 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:05,399 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,400 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/dbb7158ba5e7465f98864423268575a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/02da981306b6494c8eac8de2237c7ec6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/00b187fd0f0841858f0ddcc5e3d183d5] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=35.2 K 2024-11-20T17:22:05,401 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting dbb7158ba5e7465f98864423268575a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123323431 2024-11-20T17:22:05,401 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 02da981306b6494c8eac8de2237c7ec6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732123323459 2024-11-20T17:22:05,401 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 00b187fd0f0841858f0ddcc5e3d183d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123323776 2024-11-20T17:22:05,408 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#210 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:05,409 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d9dde800cae241d1bf417946e32a371d is 50, key is test_row_0/C:col10/1732123323776/Put/seqid=0 2024-11-20T17:22:05,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742077_1253 (size=12104) 2024-11-20T17:22:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742078_1254 (size=12104) 2024-11-20T17:22:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:22:05,523 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T17:22:05,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T17:22:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:22:05,526 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:05,527 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:05,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:05,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:22:05,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:05,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:05,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:05,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:05,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:05,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:05,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/61cfe4e8b51d424385b47f643d405430 is 50, key is test_row_0/A:col10/1732123325529/Put/seqid=0 2024-11-20T17:22:05,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123385536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123385535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742079_1255 (size=12001) 2024-11-20T17:22:05,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/61cfe4e8b51d424385b47f643d405430 2024-11-20T17:22:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123385536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123385537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123385537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/c95e944d9edc413f9ca5f40b7dd8d7c9 is 50, key is test_row_0/B:col10/1732123325529/Put/seqid=0 2024-11-20T17:22:05,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742080_1256 (size=12001) 2024-11-20T17:22:05,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/c95e944d9edc413f9ca5f40b7dd8d7c9 2024-11-20T17:22:05,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/0b3b519a7e0d467d810a7e6f2447be13 is 50, key is test_row_0/C:col10/1732123325529/Put/seqid=0 2024-11-20T17:22:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742081_1257 (size=12001) 2024-11-20T17:22:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:22:05,635 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:22:05,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123385638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123385638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123385640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123385641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123385641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:05,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:22:05,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:05,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,819 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/000e494816d64c35b8ab85b711051b74 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/000e494816d64c35b8ab85b711051b74 2024-11-20T17:22:05,822 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d9dde800cae241d1bf417946e32a371d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d9dde800cae241d1bf417946e32a371d 2024-11-20T17:22:05,825 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 000e494816d64c35b8ab85b711051b74(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:05,825 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:05,825 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=13, startTime=1732123325356; duration=0sec 2024-11-20T17:22:05,825 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:05,825 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:05,827 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into d9dde800cae241d1bf417946e32a371d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:05,827 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:05,827 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=13, startTime=1732123325356; duration=0sec 2024-11-20T17:22:05,827 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:05,827 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:22:05,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:05,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:22:05,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:05,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123385842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123385843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123385845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123385845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123385845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:05,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/0b3b519a7e0d467d810a7e6f2447be13 2024-11-20T17:22:05,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/61cfe4e8b51d424385b47f643d405430 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/61cfe4e8b51d424385b47f643d405430 2024-11-20T17:22:05,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/61cfe4e8b51d424385b47f643d405430, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T17:22:05,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/c95e944d9edc413f9ca5f40b7dd8d7c9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/c95e944d9edc413f9ca5f40b7dd8d7c9 2024-11-20T17:22:05,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/c95e944d9edc413f9ca5f40b7dd8d7c9, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T17:22:05,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/0b3b519a7e0d467d810a7e6f2447be13 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0b3b519a7e0d467d810a7e6f2447be13 2024-11-20T17:22:05,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:05,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:22:05,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:05,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:05,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0b3b519a7e0d467d810a7e6f2447be13, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T17:22:05,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:05,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3030edb9ba565d9149afc5328873a8ef in 460ms, sequenceid=77, compaction requested=false 2024-11-20T17:22:05,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:22:06,140 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:06,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:22:06,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:06,141 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:22:06,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:06,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:06,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:06,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:06,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:06,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/5e0d48db29bb4926bd947974e124dd7c is 50, key is test_row_0/A:col10/1732123325536/Put/seqid=0 2024-11-20T17:22:06,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742082_1258 (size=12001) 2024-11-20T17:22:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:06,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:06,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123386166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123386166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123386167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123386168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123386169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123386270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123386270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123386270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123386272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123386272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123386472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123386473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123386474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123386475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123386476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,551 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/5e0d48db29bb4926bd947974e124dd7c 2024-11-20T17:22:06,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/078dfd3501554c488c3d6b87bccb57c2 is 50, key is test_row_0/B:col10/1732123325536/Put/seqid=0 2024-11-20T17:22:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742083_1259 (size=12001) 2024-11-20T17:22:06,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:22:06,769 DEBUG [master/d514dc944523:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-20T17:22:06,774 DEBUG [master/d514dc944523:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5ae1ceb1863550e6bded974b57fd057c changed from -1.0 to 0.0, refreshing cache 2024-11-20T17:22:06,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123386775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123386777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123386778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123386778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:06,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123386780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:06,964 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/078dfd3501554c488c3d6b87bccb57c2 2024-11-20T17:22:06,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/09b8596fa6df41efb980ef3825410d51 is 50, key is test_row_0/C:col10/1732123325536/Put/seqid=0 2024-11-20T17:22:06,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742084_1260 (size=12001) 2024-11-20T17:22:07,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123387279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:07,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123387279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:07,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123387280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:07,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123387281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:07,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:07,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123387282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:07,377 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/09b8596fa6df41efb980ef3825410d51 2024-11-20T17:22:07,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/5e0d48db29bb4926bd947974e124dd7c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/5e0d48db29bb4926bd947974e124dd7c 2024-11-20T17:22:07,387 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/5e0d48db29bb4926bd947974e124dd7c, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T17:22:07,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/078dfd3501554c488c3d6b87bccb57c2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/078dfd3501554c488c3d6b87bccb57c2 2024-11-20T17:22:07,392 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/078dfd3501554c488c3d6b87bccb57c2, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T17:22:07,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/09b8596fa6df41efb980ef3825410d51 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/09b8596fa6df41efb980ef3825410d51 2024-11-20T17:22:07,397 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/09b8596fa6df41efb980ef3825410d51, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T17:22:07,398 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3030edb9ba565d9149afc5328873a8ef in 1258ms, sequenceid=91, compaction requested=true 2024-11-20T17:22:07,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:07,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:07,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T17:22:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T17:22:07,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T17:22:07,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8720 sec 2024-11-20T17:22:07,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.8770 sec 2024-11-20T17:22:07,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:22:07,630 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T17:22:07,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T17:22:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:22:07,633 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:07,633 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:07,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:07,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:22:07,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:07,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T17:22:07,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:07,786 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:22:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:07,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/b72494d93b794f41bd729617bdb61d13 is 50, key is test_row_0/A:col10/1732123326167/Put/seqid=0 2024-11-20T17:22:07,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742085_1261 (size=12001) 2024-11-20T17:22:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:22:08,195 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/b72494d93b794f41bd729617bdb61d13 2024-11-20T17:22:08,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/6a962a25dfe34afe9677135434763f3b is 50, key is test_row_0/B:col10/1732123326167/Put/seqid=0 2024-11-20T17:22:08,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742086_1262 (size=12001) 2024-11-20T17:22:08,208 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/6a962a25dfe34afe9677135434763f3b 2024-11-20T17:22:08,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/cd53a8c2ca5041f29ed0394ea6e34a38 is 50, key is test_row_0/C:col10/1732123326167/Put/seqid=0 2024-11-20T17:22:08,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742087_1263 (size=12001) 2024-11-20T17:22:08,222 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/cd53a8c2ca5041f29ed0394ea6e34a38 2024-11-20T17:22:08,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/b72494d93b794f41bd729617bdb61d13 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/b72494d93b794f41bd729617bdb61d13 2024-11-20T17:22:08,231 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/b72494d93b794f41bd729617bdb61d13, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T17:22:08,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/6a962a25dfe34afe9677135434763f3b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6a962a25dfe34afe9677135434763f3b 2024-11-20T17:22:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:22:08,238 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6a962a25dfe34afe9677135434763f3b, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T17:22:08,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/cd53a8c2ca5041f29ed0394ea6e34a38 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cd53a8c2ca5041f29ed0394ea6e34a38 2024-11-20T17:22:08,244 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cd53a8c2ca5041f29ed0394ea6e34a38, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T17:22:08,244 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 3030edb9ba565d9149afc5328873a8ef in 459ms, sequenceid=116, compaction requested=true 2024-11-20T17:22:08,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:08,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:08,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T17:22:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T17:22:08,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T17:22:08,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 613 msec 2024-11-20T17:22:08,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 617 msec 2024-11-20T17:22:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:08,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:22:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:08,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/521dbe6723004b4ebccdac27bef5fce6 is 50, key is test_row_0/A:col10/1732123328294/Put/seqid=0 2024-11-20T17:22:08,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742088_1264 (size=14441) 2024-11-20T17:22:08,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/521dbe6723004b4ebccdac27bef5fce6 2024-11-20T17:22:08,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/f45cdde1f4e14c5b8026426f63aa92aa is 50, key is test_row_0/B:col10/1732123328294/Put/seqid=0 2024-11-20T17:22:08,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123388313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123388314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742089_1265 (size=12101) 2024-11-20T17:22:08,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123388315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/f45cdde1f4e14c5b8026426f63aa92aa 2024-11-20T17:22:08,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123388318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123388318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/a22f6239b9f44684ab33c7af36a9af19 is 50, key is test_row_0/C:col10/1732123328294/Put/seqid=0 2024-11-20T17:22:08,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742090_1266 (size=12101) 2024-11-20T17:22:08,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/a22f6239b9f44684ab33c7af36a9af19 2024-11-20T17:22:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/521dbe6723004b4ebccdac27bef5fce6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/521dbe6723004b4ebccdac27bef5fce6 2024-11-20T17:22:08,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/521dbe6723004b4ebccdac27bef5fce6, entries=200, sequenceid=130, filesize=14.1 K 2024-11-20T17:22:08,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/f45cdde1f4e14c5b8026426f63aa92aa as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/f45cdde1f4e14c5b8026426f63aa92aa 2024-11-20T17:22:08,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/f45cdde1f4e14c5b8026426f63aa92aa, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T17:22:08,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/a22f6239b9f44684ab33c7af36a9af19 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/a22f6239b9f44684ab33c7af36a9af19 2024-11-20T17:22:08,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/a22f6239b9f44684ab33c7af36a9af19, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T17:22:08,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 3030edb9ba565d9149afc5328873a8ef in 58ms, sequenceid=130, compaction requested=true 2024-11-20T17:22:08,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:08,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:08,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:08,353 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:22:08,353 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:22:08,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:08,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:08,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:08,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:08,356 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60208 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:22:08,356 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62548 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:22:08,356 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:08,356 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:08,356 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:08,356 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:08,356 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/000e494816d64c35b8ab85b711051b74, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/61cfe4e8b51d424385b47f643d405430, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/5e0d48db29bb4926bd947974e124dd7c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/b72494d93b794f41bd729617bdb61d13, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/521dbe6723004b4ebccdac27bef5fce6] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=61.1 K 2024-11-20T17:22:08,356 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1a59e34846874f68a876e51266d1ae98, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/c95e944d9edc413f9ca5f40b7dd8d7c9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/078dfd3501554c488c3d6b87bccb57c2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6a962a25dfe34afe9677135434763f3b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/f45cdde1f4e14c5b8026426f63aa92aa] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=58.8 K 2024-11-20T17:22:08,357 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 000e494816d64c35b8ab85b711051b74, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123323776 2024-11-20T17:22:08,357 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a59e34846874f68a876e51266d1ae98, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123323776 2024-11-20T17:22:08,358 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c95e944d9edc413f9ca5f40b7dd8d7c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732123324914 2024-11-20T17:22:08,358 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61cfe4e8b51d424385b47f643d405430, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732123324914 2024-11-20T17:22:08,358 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 078dfd3501554c488c3d6b87bccb57c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123325535 2024-11-20T17:22:08,358 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e0d48db29bb4926bd947974e124dd7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123325535 2024-11-20T17:22:08,359 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a962a25dfe34afe9677135434763f3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123326165 2024-11-20T17:22:08,359 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b72494d93b794f41bd729617bdb61d13, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123326165 2024-11-20T17:22:08,359 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 521dbe6723004b4ebccdac27bef5fce6, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732123328293 2024-11-20T17:22:08,360 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f45cdde1f4e14c5b8026426f63aa92aa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732123328294 2024-11-20T17:22:08,373 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#223 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:08,374 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/dbc6718c166442a4ba9ea6bf49a8cfd5 is 50, key is test_row_0/B:col10/1732123328294/Put/seqid=0 2024-11-20T17:22:08,384 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#224 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:08,385 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/68918d6413ed4175b2f7732441d0b095 is 50, key is test_row_0/A:col10/1732123328294/Put/seqid=0 2024-11-20T17:22:08,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742091_1267 (size=12375) 2024-11-20T17:22:08,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742092_1268 (size=12375) 2024-11-20T17:22:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:08,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:22:08,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:08,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:08,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:08,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:08,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:08,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:08,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/72bbcb4b7a514722aa9797bb479111de is 50, key is test_row_0/A:col10/1732123328317/Put/seqid=0 2024-11-20T17:22:08,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123388433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123388432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742093_1269 (size=12151) 2024-11-20T17:22:08,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123388436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123388437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123388437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123388538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123388539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123388541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123388541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123388542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:22:08,736 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T17:22:08,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T17:22:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:08,739 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:08,740 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:08,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:08,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123388742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123388743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123388745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123388745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123388747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:08,797 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/dbc6718c166442a4ba9ea6bf49a8cfd5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/dbc6718c166442a4ba9ea6bf49a8cfd5 2024-11-20T17:22:08,803 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into dbc6718c166442a4ba9ea6bf49a8cfd5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:08,803 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:08,803 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=11, startTime=1732123328353; duration=0sec 2024-11-20T17:22:08,803 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:08,803 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:08,803 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:22:08,806 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60208 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:22:08,806 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:08,806 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:08,806 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d9dde800cae241d1bf417946e32a371d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0b3b519a7e0d467d810a7e6f2447be13, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/09b8596fa6df41efb980ef3825410d51, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cd53a8c2ca5041f29ed0394ea6e34a38, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/a22f6239b9f44684ab33c7af36a9af19] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=58.8 K 2024-11-20T17:22:08,807 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d9dde800cae241d1bf417946e32a371d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123323776 2024-11-20T17:22:08,807 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/68918d6413ed4175b2f7732441d0b095 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/68918d6413ed4175b2f7732441d0b095 2024-11-20T17:22:08,807 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b3b519a7e0d467d810a7e6f2447be13, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732123324914 2024-11-20T17:22:08,808 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 09b8596fa6df41efb980ef3825410d51, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123325535 2024-11-20T17:22:08,808 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting cd53a8c2ca5041f29ed0394ea6e34a38, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732123326165 2024-11-20T17:22:08,809 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting a22f6239b9f44684ab33c7af36a9af19, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732123328294 2024-11-20T17:22:08,812 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 68918d6413ed4175b2f7732441d0b095(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:08,812 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:08,812 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=11, startTime=1732123328353; duration=0sec 2024-11-20T17:22:08,813 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:08,813 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:08,823 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:08,824 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/f84d4033a21e4567bdebf6bdf126c9b4 is 50, key is test_row_0/C:col10/1732123328294/Put/seqid=0 2024-11-20T17:22:08,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742094_1270 (size=12375) 2024-11-20T17:22:08,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/72bbcb4b7a514722aa9797bb479111de 2024-11-20T17:22:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:08,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/6c098ea54501433f972c6a4c8ed0d398 is 50, key is test_row_0/B:col10/1732123328317/Put/seqid=0 2024-11-20T17:22:08,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742095_1271 (size=12151) 2024-11-20T17:22:08,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:08,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:08,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:08,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:08,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:08,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:08,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:09,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:09,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:09,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:09,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123389047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123389047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123389048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123389048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123389049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:09,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:09,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:09,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,234 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/f84d4033a21e4567bdebf6bdf126c9b4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/f84d4033a21e4567bdebf6bdf126c9b4 2024-11-20T17:22:09,239 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into f84d4033a21e4567bdebf6bdf126c9b4(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:09,239 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:09,239 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=11, startTime=1732123328353; duration=0sec 2024-11-20T17:22:09,239 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:09,239 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:09,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/6c098ea54501433f972c6a4c8ed0d398 2024-11-20T17:22:09,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/68e6534acae54d3bb22b0d688bb94c55 is 50, key is test_row_0/C:col10/1732123328317/Put/seqid=0 2024-11-20T17:22:09,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742096_1272 (size=12151) 2024-11-20T17:22:09,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:09,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:09,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:09,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:09,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,502 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:09,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:09,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:09,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123389550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123389551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123389552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123389554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:09,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123389556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:09,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:09,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:09,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:09,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:09,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/68e6534acae54d3bb22b0d688bb94c55 2024-11-20T17:22:09,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/72bbcb4b7a514722aa9797bb479111de as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/72bbcb4b7a514722aa9797bb479111de 2024-11-20T17:22:09,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/72bbcb4b7a514722aa9797bb479111de, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T17:22:09,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/6c098ea54501433f972c6a4c8ed0d398 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6c098ea54501433f972c6a4c8ed0d398 2024-11-20T17:22:09,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6c098ea54501433f972c6a4c8ed0d398, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T17:22:09,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/68e6534acae54d3bb22b0d688bb94c55 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/68e6534acae54d3bb22b0d688bb94c55 2024-11-20T17:22:09,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/68e6534acae54d3bb22b0d688bb94c55, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T17:22:09,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3030edb9ba565d9149afc5328873a8ef in 1263ms, sequenceid=155, compaction requested=false 2024-11-20T17:22:09,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:09,810 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:09,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:22:09,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:09,810 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:22:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:09,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/fe1462a7bd7846d78841d61a28e77e67 is 50, key is test_row_0/A:col10/1732123328436/Put/seqid=0 2024-11-20T17:22:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742097_1273 (size=12151) 2024-11-20T17:22:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:10,220 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/fe1462a7bd7846d78841d61a28e77e67 2024-11-20T17:22:10,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/9178e01eb8de4a339a6459b3a30a5267 is 50, key is test_row_0/B:col10/1732123328436/Put/seqid=0 2024-11-20T17:22:10,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742098_1274 (size=12151) 2024-11-20T17:22:10,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:10,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:10,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123390571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123390572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123390572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123390574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123390575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,632 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/9178e01eb8de4a339a6459b3a30a5267 2024-11-20T17:22:10,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/0ea4979d826044129672016494100156 is 50, key is test_row_0/C:col10/1732123328436/Put/seqid=0 2024-11-20T17:22:10,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742099_1275 (size=12151) 2024-11-20T17:22:10,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123390676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123390676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123390676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123390677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123390677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:10,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123390877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123390879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123390879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123390880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:10,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:10,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123390880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,045 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/0ea4979d826044129672016494100156 2024-11-20T17:22:11,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/fe1462a7bd7846d78841d61a28e77e67 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/fe1462a7bd7846d78841d61a28e77e67 2024-11-20T17:22:11,053 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/fe1462a7bd7846d78841d61a28e77e67, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T17:22:11,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/9178e01eb8de4a339a6459b3a30a5267 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9178e01eb8de4a339a6459b3a30a5267 2024-11-20T17:22:11,058 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9178e01eb8de4a339a6459b3a30a5267, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T17:22:11,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/0ea4979d826044129672016494100156 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0ea4979d826044129672016494100156 2024-11-20T17:22:11,063 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0ea4979d826044129672016494100156, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T17:22:11,064 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3030edb9ba565d9149afc5328873a8ef in 1254ms, sequenceid=170, compaction requested=true 2024-11-20T17:22:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:11,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T17:22:11,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T17:22:11,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T17:22:11,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3250 sec 2024-11-20T17:22:11,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.3300 sec 2024-11-20T17:22:11,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:11,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:22:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:11,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123391188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123391189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123391189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123391190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123391190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bf9c590c2c1242f99599db0c0e298f18 is 50, key is test_row_0/A:col10/1732123331184/Put/seqid=0 2024-11-20T17:22:11,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742100_1276 (size=16931) 2024-11-20T17:22:11,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123391293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123391295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123391295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123391295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123391295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123391497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123391498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123391498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123391499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123391499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bf9c590c2c1242f99599db0c0e298f18 2024-11-20T17:22:11,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/a378cf463b814ac382197a9744634d7f is 50, key is test_row_0/B:col10/1732123331184/Put/seqid=0 2024-11-20T17:22:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742101_1277 (size=12151) 2024-11-20T17:22:11,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/a378cf463b814ac382197a9744634d7f 2024-11-20T17:22:11,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d306e6065bf847d79494c69b6146ac69 is 50, key is test_row_0/C:col10/1732123331184/Put/seqid=0 2024-11-20T17:22:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742102_1278 (size=12151) 2024-11-20T17:22:11,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123391799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123391802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123391802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123391802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123391802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:11,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T17:22:11,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T17:22:12,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d306e6065bf847d79494c69b6146ac69 2024-11-20T17:22:12,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bf9c590c2c1242f99599db0c0e298f18 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bf9c590c2c1242f99599db0c0e298f18 2024-11-20T17:22:12,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bf9c590c2c1242f99599db0c0e298f18, entries=250, sequenceid=198, filesize=16.5 K 2024-11-20T17:22:12,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/a378cf463b814ac382197a9744634d7f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/a378cf463b814ac382197a9744634d7f 2024-11-20T17:22:12,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/a378cf463b814ac382197a9744634d7f, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T17:22:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d306e6065bf847d79494c69b6146ac69 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d306e6065bf847d79494c69b6146ac69 2024-11-20T17:22:12,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d306e6065bf847d79494c69b6146ac69, entries=150, sequenceid=198, filesize=11.9 K 2024-11-20T17:22:12,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 3030edb9ba565d9149afc5328873a8ef in 877ms, sequenceid=198, compaction requested=true 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:12,061 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:22:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:12,061 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:22:12,062 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53608 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:22:12,062 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:22:12,062 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:12,062 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:12,063 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:12,063 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:12,063 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/dbc6718c166442a4ba9ea6bf49a8cfd5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6c098ea54501433f972c6a4c8ed0d398, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9178e01eb8de4a339a6459b3a30a5267, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/a378cf463b814ac382197a9744634d7f] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=47.7 K 2024-11-20T17:22:12,063 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/68918d6413ed4175b2f7732441d0b095, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/72bbcb4b7a514722aa9797bb479111de, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/fe1462a7bd7846d78841d61a28e77e67, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bf9c590c2c1242f99599db0c0e298f18] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=52.4 K 2024-11-20T17:22:12,063 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting dbc6718c166442a4ba9ea6bf49a8cfd5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732123328294 2024-11-20T17:22:12,063 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68918d6413ed4175b2f7732441d0b095, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732123328294 2024-11-20T17:22:12,063 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c098ea54501433f972c6a4c8ed0d398, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123328317 2024-11-20T17:22:12,063 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72bbcb4b7a514722aa9797bb479111de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123328317 2024-11-20T17:22:12,064 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9178e01eb8de4a339a6459b3a30a5267, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732123328426 2024-11-20T17:22:12,064 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe1462a7bd7846d78841d61a28e77e67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732123328426 2024-11-20T17:22:12,064 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting a378cf463b814ac382197a9744634d7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732123330573 2024-11-20T17:22:12,064 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf9c590c2c1242f99599db0c0e298f18, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732123330570 2024-11-20T17:22:12,074 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:12,075 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/4d65492016a046a290bfda8e938ec0fd is 50, key is test_row_0/A:col10/1732123331184/Put/seqid=0 2024-11-20T17:22:12,075 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#236 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:12,076 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/0e470b5356cc4cbcbf6a86f7ea6da3b8 is 50, key is test_row_0/B:col10/1732123331184/Put/seqid=0 2024-11-20T17:22:12,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742103_1279 (size=12561) 2024-11-20T17:22:12,087 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/4d65492016a046a290bfda8e938ec0fd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/4d65492016a046a290bfda8e938ec0fd 2024-11-20T17:22:12,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742104_1280 (size=12561) 2024-11-20T17:22:12,093 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 4d65492016a046a290bfda8e938ec0fd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:12,093 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:12,093 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=12, startTime=1732123332061; duration=0sec 2024-11-20T17:22:12,094 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:12,094 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:12,094 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:22:12,095 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:22:12,095 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:12,095 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:12,095 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/f84d4033a21e4567bdebf6bdf126c9b4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/68e6534acae54d3bb22b0d688bb94c55, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0ea4979d826044129672016494100156, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d306e6065bf847d79494c69b6146ac69] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=47.7 K 2024-11-20T17:22:12,095 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/0e470b5356cc4cbcbf6a86f7ea6da3b8 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e470b5356cc4cbcbf6a86f7ea6da3b8 2024-11-20T17:22:12,096 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting f84d4033a21e4567bdebf6bdf126c9b4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732123328294 2024-11-20T17:22:12,096 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68e6534acae54d3bb22b0d688bb94c55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123328317 2024-11-20T17:22:12,096 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ea4979d826044129672016494100156, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732123328426 2024-11-20T17:22:12,097 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d306e6065bf847d79494c69b6146ac69, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732123330573 2024-11-20T17:22:12,102 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into 0e470b5356cc4cbcbf6a86f7ea6da3b8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:12,102 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:12,102 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=12, startTime=1732123332061; duration=0sec 2024-11-20T17:22:12,104 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:12,104 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:12,108 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:12,108 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/bfce8245240a46759e4a1757b339a406 is 50, key is test_row_0/C:col10/1732123331184/Put/seqid=0 2024-11-20T17:22:12,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742105_1281 (size=12561) 2024-11-20T17:22:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:12,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:22:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:12,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/3c208e9c3b80422582b7466f5a48f294 is 50, key is test_row_0/A:col10/1732123332305/Put/seqid=0 2024-11-20T17:22:12,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742106_1282 (size=12151) 2024-11-20T17:22:12,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123392329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123392329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123392330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123392331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123392331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123392433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123392433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123392434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123392434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123392435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,532 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/bfce8245240a46759e4a1757b339a406 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bfce8245240a46759e4a1757b339a406 2024-11-20T17:22:12,537 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into bfce8245240a46759e4a1757b339a406(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:12,537 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:12,538 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=12, startTime=1732123332061; duration=0sec 2024-11-20T17:22:12,538 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:12,538 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123392636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123392637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123392637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123392637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123392638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/3c208e9c3b80422582b7466f5a48f294 2024-11-20T17:22:12,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/01d0828617f14f819ff9719ca4ea2d9b is 50, key is test_row_0/B:col10/1732123332305/Put/seqid=0 2024-11-20T17:22:12,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742107_1283 (size=12151) 2024-11-20T17:22:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:22:12,844 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T17:22:12,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T17:22:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:22:12,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:12,848 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:12,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:12,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123392939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123392940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123392941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123392941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123392941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:22:12,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:13,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T17:22:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/01d0828617f14f819ff9719ca4ea2d9b 2024-11-20T17:22:13,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/e6e877b0f3574ac3bd3defe0ad0632cd is 50, key is test_row_0/C:col10/1732123332305/Put/seqid=0 2024-11-20T17:22:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:22:13,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742108_1284 (size=12151) 2024-11-20T17:22:13,153 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:13,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T17:22:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,305 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:13,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T17:22:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123393444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:13,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:13,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123393444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:13,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123393444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:13,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123393446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:13,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123393446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:22:13,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:13,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T17:22:13,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:13,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:13,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/e6e877b0f3574ac3bd3defe0ad0632cd 2024-11-20T17:22:13,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/3c208e9c3b80422582b7466f5a48f294 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/3c208e9c3b80422582b7466f5a48f294 2024-11-20T17:22:13,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/3c208e9c3b80422582b7466f5a48f294, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T17:22:13,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/01d0828617f14f819ff9719ca4ea2d9b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/01d0828617f14f819ff9719ca4ea2d9b 2024-11-20T17:22:13,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/01d0828617f14f819ff9719ca4ea2d9b, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T17:22:13,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/e6e877b0f3574ac3bd3defe0ad0632cd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/e6e877b0f3574ac3bd3defe0ad0632cd 2024-11-20T17:22:13,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/e6e877b0f3574ac3bd3defe0ad0632cd, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T17:22:13,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3030edb9ba565d9149afc5328873a8ef in 1263ms, sequenceid=212, compaction requested=false 2024-11-20T17:22:13,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:13,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:13,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T17:22:13,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:13,610 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:22:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:13,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/dc0bc211357f48bbafe1eb5438cd363c is 50, key is test_row_0/A:col10/1732123332329/Put/seqid=0 2024-11-20T17:22:13,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742109_1285 (size=12151) 2024-11-20T17:22:13,621 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/dc0bc211357f48bbafe1eb5438cd363c 2024-11-20T17:22:13,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/172518b1eb524160a6108112df3e3a28 is 50, key is test_row_0/B:col10/1732123332329/Put/seqid=0 2024-11-20T17:22:13,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742110_1286 (size=12151) 2024-11-20T17:22:13,650 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/172518b1eb524160a6108112df3e3a28 2024-11-20T17:22:13,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/c1b7da8922534af9a96962b5c5550afd is 50, key is test_row_0/C:col10/1732123332329/Put/seqid=0 2024-11-20T17:22:13,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742111_1287 (size=12151) 2024-11-20T17:22:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:22:14,065 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/c1b7da8922534af9a96962b5c5550afd 2024-11-20T17:22:14,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/dc0bc211357f48bbafe1eb5438cd363c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/dc0bc211357f48bbafe1eb5438cd363c 2024-11-20T17:22:14,074 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/dc0bc211357f48bbafe1eb5438cd363c, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T17:22:14,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/172518b1eb524160a6108112df3e3a28 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/172518b1eb524160a6108112df3e3a28 2024-11-20T17:22:14,079 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/172518b1eb524160a6108112df3e3a28, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T17:22:14,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/c1b7da8922534af9a96962b5c5550afd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c1b7da8922534af9a96962b5c5550afd 2024-11-20T17:22:14,083 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c1b7da8922534af9a96962b5c5550afd, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T17:22:14,084 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 3030edb9ba565d9149afc5328873a8ef in 474ms, sequenceid=238, compaction requested=true 2024-11-20T17:22:14,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:14,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:14,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T17:22:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T17:22:14,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T17:22:14,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2370 sec 2024-11-20T17:22:14,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.2410 sec 2024-11-20T17:22:14,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:14,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:22:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:14,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:14,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/ef107feb57124a8bb5c5e318923dce73 is 50, key is test_row_0/A:col10/1732123334456/Put/seqid=0 2024-11-20T17:22:14,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123394475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123394475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742112_1288 (size=12151) 2024-11-20T17:22:14,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123394478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123394479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123394480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123394581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123394581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123394583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123394583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123394584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123394783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123394783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123394787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123394788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:14,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123394788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:14,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/ef107feb57124a8bb5c5e318923dce73 2024-11-20T17:22:14,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/654e5108747b44d799104b021e945d3c is 50, key is test_row_0/B:col10/1732123334456/Put/seqid=0 2024-11-20T17:22:14,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742113_1289 (size=12151) 2024-11-20T17:22:14,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:22:14,951 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T17:22:14,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:14,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T17:22:14,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:22:14,954 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:14,955 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:14,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:22:15,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123395088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123395088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123395090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123395090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123395093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:15,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:22:15,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:15,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:15,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:15,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:22:15,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:15,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:22:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:15,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:15,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/654e5108747b44d799104b021e945d3c 2024-11-20T17:22:15,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/15ee6bfdb8e2472ebdd9e124482bbc9a is 50, key is test_row_0/C:col10/1732123334456/Put/seqid=0 2024-11-20T17:22:15,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742114_1290 (size=12151) 2024-11-20T17:22:15,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/15ee6bfdb8e2472ebdd9e124482bbc9a 2024-11-20T17:22:15,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/ef107feb57124a8bb5c5e318923dce73 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/ef107feb57124a8bb5c5e318923dce73 2024-11-20T17:22:15,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/ef107feb57124a8bb5c5e318923dce73, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T17:22:15,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/654e5108747b44d799104b021e945d3c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/654e5108747b44d799104b021e945d3c 2024-11-20T17:22:15,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/654e5108747b44d799104b021e945d3c, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T17:22:15,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/15ee6bfdb8e2472ebdd9e124482bbc9a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/15ee6bfdb8e2472ebdd9e124482bbc9a 2024-11-20T17:22:15,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/15ee6bfdb8e2472ebdd9e124482bbc9a, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T17:22:15,327 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3030edb9ba565d9149afc5328873a8ef in 871ms, sequenceid=250, compaction requested=true 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:15,327 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:15,327 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:22:15,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:15,328 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:22:15,328 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:22:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:15,329 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:15,329 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,329 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,329 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/4d65492016a046a290bfda8e938ec0fd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/3c208e9c3b80422582b7466f5a48f294, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/dc0bc211357f48bbafe1eb5438cd363c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/ef107feb57124a8bb5c5e318923dce73] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=47.9 K 2024-11-20T17:22:15,329 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e470b5356cc4cbcbf6a86f7ea6da3b8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/01d0828617f14f819ff9719ca4ea2d9b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/172518b1eb524160a6108112df3e3a28, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/654e5108747b44d799104b021e945d3c] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=47.9 K 2024-11-20T17:22:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d65492016a046a290bfda8e938ec0fd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732123330573 2024-11-20T17:22:15,329 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e470b5356cc4cbcbf6a86f7ea6da3b8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732123330573 2024-11-20T17:22:15,329 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c208e9c3b80422582b7466f5a48f294, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123332305 2024-11-20T17:22:15,329 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 01d0828617f14f819ff9719ca4ea2d9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123332305 2024-11-20T17:22:15,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc0bc211357f48bbafe1eb5438cd363c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732123332329 2024-11-20T17:22:15,330 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 172518b1eb524160a6108112df3e3a28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732123332329 2024-11-20T17:22:15,330 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 654e5108747b44d799104b021e945d3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123334451 2024-11-20T17:22:15,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef107feb57124a8bb5c5e318923dce73, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123334451 2024-11-20T17:22:15,338 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:15,339 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/e52c49ca0c664ef6a354a38f92ed2807 is 50, key is test_row_0/B:col10/1732123334456/Put/seqid=0 2024-11-20T17:22:15,340 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#248 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:15,340 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/8f2de56c102c4634a7978e6e0e0a95ff is 50, key is test_row_0/A:col10/1732123334456/Put/seqid=0 2024-11-20T17:22:15,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742115_1291 (size=12697) 2024-11-20T17:22:15,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742116_1292 (size=12697) 2024-11-20T17:22:15,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:15,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:22:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,414 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:22:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/312ce682b32d404d8371778f07ef08bc is 50, key is test_row_0/A:col10/1732123334473/Put/seqid=0 2024-11-20T17:22:15,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742117_1293 (size=12301) 2024-11-20T17:22:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:22:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:15,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:15,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123395599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123395599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123395600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123395601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123395601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123395702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123395703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123395704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123395704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123395705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,751 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/e52c49ca0c664ef6a354a38f92ed2807 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e52c49ca0c664ef6a354a38f92ed2807 2024-11-20T17:22:15,751 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/8f2de56c102c4634a7978e6e0e0a95ff as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/8f2de56c102c4634a7978e6e0e0a95ff 2024-11-20T17:22:15,756 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into e52c49ca0c664ef6a354a38f92ed2807(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:15,756 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 8f2de56c102c4634a7978e6e0e0a95ff(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:15,756 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=12, startTime=1732123335327; duration=0sec 2024-11-20T17:22:15,756 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=12, startTime=1732123335327; duration=0sec 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:15,756 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:22:15,758 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:22:15,758 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:15,758 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:15,758 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bfce8245240a46759e4a1757b339a406, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/e6e877b0f3574ac3bd3defe0ad0632cd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c1b7da8922534af9a96962b5c5550afd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/15ee6bfdb8e2472ebdd9e124482bbc9a] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=47.9 K 2024-11-20T17:22:15,758 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting bfce8245240a46759e4a1757b339a406, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732123330573 2024-11-20T17:22:15,759 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e6e877b0f3574ac3bd3defe0ad0632cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123332305 2024-11-20T17:22:15,759 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c1b7da8922534af9a96962b5c5550afd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732123332329 2024-11-20T17:22:15,759 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 15ee6bfdb8e2472ebdd9e124482bbc9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123334451 2024-11-20T17:22:15,767 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:15,768 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/ac6a510efe1248379973afcd0271aa6f is 50, key is test_row_0/C:col10/1732123334456/Put/seqid=0 2024-11-20T17:22:15,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742118_1294 (size=12697) 2024-11-20T17:22:15,823 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/312ce682b32d404d8371778f07ef08bc 2024-11-20T17:22:15,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/93d3ccc9248e4e28a14caa9a51469e83 is 50, key is test_row_0/B:col10/1732123334473/Put/seqid=0 2024-11-20T17:22:15,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742119_1295 (size=12301) 2024-11-20T17:22:15,835 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/93d3ccc9248e4e28a14caa9a51469e83 2024-11-20T17:22:15,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/8e66b5ad91db425aa5665c35f20455af is 50, key is test_row_0/C:col10/1732123334473/Put/seqid=0 2024-11-20T17:22:15,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742120_1296 (size=12301) 2024-11-20T17:22:15,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123395906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123395906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123395907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123395908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:15,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:15,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123395908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:22:16,177 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/ac6a510efe1248379973afcd0271aa6f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ac6a510efe1248379973afcd0271aa6f 2024-11-20T17:22:16,182 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into ac6a510efe1248379973afcd0271aa6f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:16,182 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:16,182 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=12, startTime=1732123335327; duration=0sec 2024-11-20T17:22:16,182 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:16,182 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123396210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123396210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123396210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123396211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123396212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,247 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/8e66b5ad91db425aa5665c35f20455af 2024-11-20T17:22:16,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/312ce682b32d404d8371778f07ef08bc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/312ce682b32d404d8371778f07ef08bc 2024-11-20T17:22:16,256 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/312ce682b32d404d8371778f07ef08bc, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:22:16,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/93d3ccc9248e4e28a14caa9a51469e83 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/93d3ccc9248e4e28a14caa9a51469e83 2024-11-20T17:22:16,260 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/93d3ccc9248e4e28a14caa9a51469e83, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:22:16,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/8e66b5ad91db425aa5665c35f20455af as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/8e66b5ad91db425aa5665c35f20455af 2024-11-20T17:22:16,264 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/8e66b5ad91db425aa5665c35f20455af, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:22:16,265 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3030edb9ba565d9149afc5328873a8ef in 852ms, sequenceid=274, compaction requested=false 2024-11-20T17:22:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T17:22:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T17:22:16,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T17:22:16,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3110 sec 2024-11-20T17:22:16,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.3150 sec 2024-11-20T17:22:16,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:16,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:22:16,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:16,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:16,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:16,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:16,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:16,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:16,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/a83e513ecb0f4d36997637f3d9b79dec is 50, key is test_row_0/A:col10/1732123335598/Put/seqid=0 2024-11-20T17:22:16,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742121_1297 (size=12301) 2024-11-20T17:22:16,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123396728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123396729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123396759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123396759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123396759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123396860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123396860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123396863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123396863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:16,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:16,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123396863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:22:17,058 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T17:22:17,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:17,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T17:22:17,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:22:17,061 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:17,062 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:17,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:17,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123397062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123397064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123397065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123397066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123397066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/a83e513ecb0f4d36997637f3d9b79dec 2024-11-20T17:22:17,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/7e924788b3184d84867bd603647d0be7 is 50, key is test_row_0/B:col10/1732123335598/Put/seqid=0 2024-11-20T17:22:17,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742122_1298 (size=12301) 2024-11-20T17:22:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:22:17,213 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:17,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T17:22:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:17,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:22:17,366 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:17,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T17:22:17,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:17,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123397367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123397368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123397369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123397369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123397370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:17,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T17:22:17,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:17,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:17,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/7e924788b3184d84867bd603647d0be7 2024-11-20T17:22:17,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/4c791a6687d14f45b3432356dcb220b9 is 50, key is test_row_0/C:col10/1732123335598/Put/seqid=0 2024-11-20T17:22:17,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742123_1299 (size=12301) 2024-11-20T17:22:17,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/4c791a6687d14f45b3432356dcb220b9 2024-11-20T17:22:17,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/a83e513ecb0f4d36997637f3d9b79dec as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/a83e513ecb0f4d36997637f3d9b79dec 2024-11-20T17:22:17,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/a83e513ecb0f4d36997637f3d9b79dec, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:22:17,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/7e924788b3184d84867bd603647d0be7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7e924788b3184d84867bd603647d0be7 2024-11-20T17:22:17,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7e924788b3184d84867bd603647d0be7, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:22:17,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/4c791a6687d14f45b3432356dcb220b9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/4c791a6687d14f45b3432356dcb220b9 2024-11-20T17:22:17,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/4c791a6687d14f45b3432356dcb220b9, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:22:17,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 3030edb9ba565d9149afc5328873a8ef in 864ms, sequenceid=290, compaction requested=true 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:17,578 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:17,578 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:17,579 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:17,579 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:17,579 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,580 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:17,580 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/8f2de56c102c4634a7978e6e0e0a95ff, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/312ce682b32d404d8371778f07ef08bc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/a83e513ecb0f4d36997637f3d9b79dec] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=36.4 K 2024-11-20T17:22:17,580 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:17,580 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,580 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e52c49ca0c664ef6a354a38f92ed2807, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/93d3ccc9248e4e28a14caa9a51469e83, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7e924788b3184d84867bd603647d0be7] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=36.4 K 2024-11-20T17:22:17,580 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f2de56c102c4634a7978e6e0e0a95ff, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123334451 2024-11-20T17:22:17,580 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e52c49ca0c664ef6a354a38f92ed2807, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123334451 2024-11-20T17:22:17,580 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 312ce682b32d404d8371778f07ef08bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123334473 2024-11-20T17:22:17,581 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 93d3ccc9248e4e28a14caa9a51469e83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123334473 2024-11-20T17:22:17,581 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a83e513ecb0f4d36997637f3d9b79dec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123335598 2024-11-20T17:22:17,581 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e924788b3184d84867bd603647d0be7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123335598 2024-11-20T17:22:17,589 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:17,589 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:17,589 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/97577bba66994e8e9a3770afdd8df2fe is 50, key is test_row_0/B:col10/1732123335598/Put/seqid=0 2024-11-20T17:22:17,590 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/450dd80eaa384b5f961aa8ffc3ff82a0 is 50, key is test_row_0/A:col10/1732123335598/Put/seqid=0 2024-11-20T17:22:17,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742124_1300 (size=12949) 2024-11-20T17:22:17,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742125_1301 (size=12949) 2024-11-20T17:22:17,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:22:17,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:17,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T17:22:17,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:17,673 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:22:17,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:17,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:17,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:17,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:17,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:17,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:17,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bd54be9b9fb844ceb256f2a47e8e23d8 is 50, key is test_row_0/A:col10/1732123336731/Put/seqid=0 2024-11-20T17:22:17,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742126_1302 (size=12301) 2024-11-20T17:22:17,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:17,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:17,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123397877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123397878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123397878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123397881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123397882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123397982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123397983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123397983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123397985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:17,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:17,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123397985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,013 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/450dd80eaa384b5f961aa8ffc3ff82a0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/450dd80eaa384b5f961aa8ffc3ff82a0 2024-11-20T17:22:18,013 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/97577bba66994e8e9a3770afdd8df2fe as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/97577bba66994e8e9a3770afdd8df2fe 2024-11-20T17:22:18,017 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 450dd80eaa384b5f961aa8ffc3ff82a0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:18,017 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into 97577bba66994e8e9a3770afdd8df2fe(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:18,017 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:18,017 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:18,017 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=13, startTime=1732123337578; duration=0sec 2024-11-20T17:22:18,017 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=13, startTime=1732123337578; duration=0sec 2024-11-20T17:22:18,018 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:18,018 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:18,018 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:18,018 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:18,018 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:18,019 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:18,019 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:18,019 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:18,019 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ac6a510efe1248379973afcd0271aa6f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/8e66b5ad91db425aa5665c35f20455af, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/4c791a6687d14f45b3432356dcb220b9] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=36.4 K 2024-11-20T17:22:18,019 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac6a510efe1248379973afcd0271aa6f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123334451 2024-11-20T17:22:18,019 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e66b5ad91db425aa5665c35f20455af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123334473 2024-11-20T17:22:18,020 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c791a6687d14f45b3432356dcb220b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123335598 2024-11-20T17:22:18,028 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:18,029 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d7128cd0019148cfb4a593a31ad5e795 is 50, key is test_row_0/C:col10/1732123335598/Put/seqid=0 2024-11-20T17:22:18,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742127_1303 (size=12949) 2024-11-20T17:22:18,082 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bd54be9b9fb844ceb256f2a47e8e23d8 2024-11-20T17:22:18,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/d1a0312fa0eb47f4acc6e02ec71bb81e is 50, key is test_row_0/B:col10/1732123336731/Put/seqid=0 2024-11-20T17:22:18,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742128_1304 (size=12301) 2024-11-20T17:22:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:22:18,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123398185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123398186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123398186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123398188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123398188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,439 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/d7128cd0019148cfb4a593a31ad5e795 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d7128cd0019148cfb4a593a31ad5e795 2024-11-20T17:22:18,446 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into d7128cd0019148cfb4a593a31ad5e795(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:18,446 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:18,446 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=13, startTime=1732123337578; duration=0sec 2024-11-20T17:22:18,446 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:18,446 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:18,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123398487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123398489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123398490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123398492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123398492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:18,494 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/d1a0312fa0eb47f4acc6e02ec71bb81e 2024-11-20T17:22:18,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/b2a5e999708c4883863b71c00881491e is 50, key is test_row_0/C:col10/1732123336731/Put/seqid=0 2024-11-20T17:22:18,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742129_1305 (size=12301) 2024-11-20T17:22:18,506 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/b2a5e999708c4883863b71c00881491e 2024-11-20T17:22:18,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/bd54be9b9fb844ceb256f2a47e8e23d8 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bd54be9b9fb844ceb256f2a47e8e23d8 2024-11-20T17:22:18,515 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bd54be9b9fb844ceb256f2a47e8e23d8, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T17:22:18,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/d1a0312fa0eb47f4acc6e02ec71bb81e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/d1a0312fa0eb47f4acc6e02ec71bb81e 2024-11-20T17:22:18,519 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/d1a0312fa0eb47f4acc6e02ec71bb81e, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T17:22:18,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/b2a5e999708c4883863b71c00881491e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/b2a5e999708c4883863b71c00881491e 2024-11-20T17:22:18,523 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/b2a5e999708c4883863b71c00881491e, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T17:22:18,524 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3030edb9ba565d9149afc5328873a8ef in 851ms, sequenceid=315, compaction requested=false 2024-11-20T17:22:18,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:18,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:18,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T17:22:18,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T17:22:18,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T17:22:18,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4630 sec 2024-11-20T17:22:18,529 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.4690 sec 2024-11-20T17:22:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:18,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:22:18,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:18,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/32b79296f7064815a2bc47b135b262cd is 50, key is test_row_0/A:col10/1732123338993/Put/seqid=0 2024-11-20T17:22:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742130_1306 (size=12297) 2024-11-20T17:22:19,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123399009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123399009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123399010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123399010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123399012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123399113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123399114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123399114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123399114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123399115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:22:19,166 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T17:22:19,167 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T17:22:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T17:22:19,168 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:19,169 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:19,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:19,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T17:22:19,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123399317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123399317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123399318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123399318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123399318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,320 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:19,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:19,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:19,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/32b79296f7064815a2bc47b135b262cd 2024-11-20T17:22:19,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/320978641a1c4ae3ad5afcdf4db18250 is 50, key is test_row_0/B:col10/1732123338993/Put/seqid=0 2024-11-20T17:22:19,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742131_1307 (size=9857) 2024-11-20T17:22:19,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T17:22:19,473 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:19,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:19,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:19,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123399620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123399621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123399621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123399621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123399622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:19,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:19,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:19,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:19,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T17:22:19,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:19,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:19,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:19,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/320978641a1c4ae3ad5afcdf4db18250 2024-11-20T17:22:19,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/ceadff24824047829b53e3e192fe71cf is 50, key is test_row_0/C:col10/1732123338993/Put/seqid=0 2024-11-20T17:22:19,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742132_1308 (size=9857) 2024-11-20T17:22:19,932 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:19,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:19,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:19,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:19,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:19,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:20,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:20,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:20,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:20,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123400124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:20,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:20,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123400126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:20,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123400128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:20,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123400128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:20,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123400128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:20,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/ceadff24824047829b53e3e192fe71cf 2024-11-20T17:22:20,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/32b79296f7064815a2bc47b135b262cd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/32b79296f7064815a2bc47b135b262cd 2024-11-20T17:22:20,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/32b79296f7064815a2bc47b135b262cd, entries=150, sequenceid=331, filesize=12.0 K 2024-11-20T17:22:20,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:20,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/320978641a1c4ae3ad5afcdf4db18250 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/320978641a1c4ae3ad5afcdf4db18250 2024-11-20T17:22:20,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:20,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:20,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:20,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/320978641a1c4ae3ad5afcdf4db18250, entries=100, sequenceid=331, filesize=9.6 K 2024-11-20T17:22:20,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/ceadff24824047829b53e3e192fe71cf as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ceadff24824047829b53e3e192fe71cf 2024-11-20T17:22:20,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ceadff24824047829b53e3e192fe71cf, entries=100, sequenceid=331, filesize=9.6 K 2024-11-20T17:22:20,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3030edb9ba565d9149afc5328873a8ef in 1253ms, sequenceid=331, compaction requested=true 2024-11-20T17:22:20,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:20,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:20,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:20,248 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:20,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:20,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:20,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:20,248 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:20,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:20,249 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37547 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:20,249 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:20,249 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:20,249 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:20,249 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,249 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,249 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/97577bba66994e8e9a3770afdd8df2fe, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/d1a0312fa0eb47f4acc6e02ec71bb81e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/320978641a1c4ae3ad5afcdf4db18250] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=34.3 K 2024-11-20T17:22:20,249 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/450dd80eaa384b5f961aa8ffc3ff82a0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bd54be9b9fb844ceb256f2a47e8e23d8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/32b79296f7064815a2bc47b135b262cd] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=36.7 K 2024-11-20T17:22:20,249 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 97577bba66994e8e9a3770afdd8df2fe, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123335598 2024-11-20T17:22:20,249 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 450dd80eaa384b5f961aa8ffc3ff82a0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123335598 2024-11-20T17:22:20,250 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d1a0312fa0eb47f4acc6e02ec71bb81e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732123336723 2024-11-20T17:22:20,250 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd54be9b9fb844ceb256f2a47e8e23d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732123336723 2024-11-20T17:22:20,250 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 320978641a1c4ae3ad5afcdf4db18250, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732123337876 2024-11-20T17:22:20,250 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32b79296f7064815a2bc47b135b262cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732123337876 2024-11-20T17:22:20,266 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#265 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:20,266 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2 is 50, key is test_row_0/B:col10/1732123338993/Put/seqid=0 2024-11-20T17:22:20,266 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:20,267 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/26d4626284a84e9b86460ca2c2838201 is 50, key is test_row_0/A:col10/1732123338993/Put/seqid=0 2024-11-20T17:22:20,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T17:22:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742133_1309 (size=13051) 2024-11-20T17:22:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742134_1310 (size=13051) 2024-11-20T17:22:20,295 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2 2024-11-20T17:22:20,299 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into 67336f9d6a8d4c4cb6a8a56af4ec7ac2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:20,299 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:20,299 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=13, startTime=1732123340248; duration=0sec 2024-11-20T17:22:20,300 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:20,300 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:20,300 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:20,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:20,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:20,301 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,301 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d7128cd0019148cfb4a593a31ad5e795, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/b2a5e999708c4883863b71c00881491e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ceadff24824047829b53e3e192fe71cf] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=34.3 K 2024-11-20T17:22:20,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d7128cd0019148cfb4a593a31ad5e795, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123335598 2024-11-20T17:22:20,301 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting b2a5e999708c4883863b71c00881491e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732123336723 2024-11-20T17:22:20,302 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ceadff24824047829b53e3e192fe71cf, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732123337876 2024-11-20T17:22:20,308 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:20,309 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/94799cdcd6894d738694b4866452f6fd is 50, key is test_row_0/C:col10/1732123338993/Put/seqid=0 2024-11-20T17:22:20,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742135_1311 (size=13051) 2024-11-20T17:22:20,319 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/94799cdcd6894d738694b4866452f6fd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/94799cdcd6894d738694b4866452f6fd 2024-11-20T17:22:20,324 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into 94799cdcd6894d738694b4866452f6fd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:20,324 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:20,324 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=13, startTime=1732123340248; duration=0sec 2024-11-20T17:22:20,324 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:20,324 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:20,391 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:20,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T17:22:20,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:20,392 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:22:20,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:20,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:20,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:20,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:20,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:20,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:20,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/441e88456997489d98e54422c1b2695f is 50, key is test_row_0/A:col10/1732123339009/Put/seqid=0 2024-11-20T17:22:20,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742136_1312 (size=12301) 2024-11-20T17:22:20,695 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/26d4626284a84e9b86460ca2c2838201 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/26d4626284a84e9b86460ca2c2838201 2024-11-20T17:22:20,700 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 26d4626284a84e9b86460ca2c2838201(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:20,700 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:20,700 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=13, startTime=1732123340247; duration=0sec 2024-11-20T17:22:20,700 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:20,700 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:20,802 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/441e88456997489d98e54422c1b2695f 2024-11-20T17:22:20,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/9b9128b44fd84c88891889be9b109f41 is 50, key is test_row_0/B:col10/1732123339009/Put/seqid=0 2024-11-20T17:22:20,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742137_1313 (size=12301) 2024-11-20T17:22:20,814 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/9b9128b44fd84c88891889be9b109f41 2024-11-20T17:22:20,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/fbead36e3539434fba6ccb6661e74475 is 50, key is test_row_0/C:col10/1732123339009/Put/seqid=0 2024-11-20T17:22:20,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742138_1314 (size=12301) 2024-11-20T17:22:21,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:21,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:21,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123401139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123401138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123401139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123401139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123401140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,234 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/fbead36e3539434fba6ccb6661e74475 2024-11-20T17:22:21,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/441e88456997489d98e54422c1b2695f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/441e88456997489d98e54422c1b2695f 2024-11-20T17:22:21,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123401242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123401242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123401242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,245 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/441e88456997489d98e54422c1b2695f, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T17:22:21,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/9b9128b44fd84c88891889be9b109f41 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9b9128b44fd84c88891889be9b109f41 2024-11-20T17:22:21,249 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9b9128b44fd84c88891889be9b109f41, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T17:22:21,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/fbead36e3539434fba6ccb6661e74475 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/fbead36e3539434fba6ccb6661e74475 2024-11-20T17:22:21,254 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/fbead36e3539434fba6ccb6661e74475, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T17:22:21,255 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3030edb9ba565d9149afc5328873a8ef in 863ms, sequenceid=356, compaction requested=false 2024-11-20T17:22:21,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:21,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:21,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T17:22:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T17:22:21,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T17:22:21,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0870 sec 2024-11-20T17:22:21,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.0910 sec 2024-11-20T17:22:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T17:22:21,272 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T17:22:21,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:21,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-20T17:22:21,275 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:21,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:22:21,275 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:21,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:22:21,427 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:21,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T17:22:21,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:21,428 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:22:21,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:21,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:21,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:21,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:21,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:21,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:21,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/940ce672d102450ca6899932d486742f is 50, key is test_row_0/A:col10/1732123341139/Put/seqid=0 2024-11-20T17:22:21,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742139_1315 (size=12301) 2024-11-20T17:22:21,439 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/940ce672d102450ca6899932d486742f 2024-11-20T17:22:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:21,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:21,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/8c6c66b6a009487b8f33cf9362419008 is 50, key is test_row_0/B:col10/1732123341139/Put/seqid=0 2024-11-20T17:22:21,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742140_1316 (size=12301) 2024-11-20T17:22:21,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123401465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123401466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123401467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123401569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123401569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123401571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:22:21,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123401773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123401773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:21,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123401774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:21,852 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/8c6c66b6a009487b8f33cf9362419008 2024-11-20T17:22:21,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/6f557ab89b0c43d7a475935d50f43471 is 50, key is test_row_0/C:col10/1732123341139/Put/seqid=0 2024-11-20T17:22:21,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742141_1317 (size=12301) 2024-11-20T17:22:21,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:22:22,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123402076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123402076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123402076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,265 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/6f557ab89b0c43d7a475935d50f43471 2024-11-20T17:22:22,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/940ce672d102450ca6899932d486742f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/940ce672d102450ca6899932d486742f 2024-11-20T17:22:22,274 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/940ce672d102450ca6899932d486742f, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T17:22:22,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/8c6c66b6a009487b8f33cf9362419008 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/8c6c66b6a009487b8f33cf9362419008 2024-11-20T17:22:22,280 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/8c6c66b6a009487b8f33cf9362419008, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T17:22:22,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/6f557ab89b0c43d7a475935d50f43471 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/6f557ab89b0c43d7a475935d50f43471 2024-11-20T17:22:22,285 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/6f557ab89b0c43d7a475935d50f43471, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T17:22:22,285 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3030edb9ba565d9149afc5328873a8ef in 857ms, sequenceid=370, compaction requested=true 2024-11-20T17:22:22,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:22,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:22,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-20T17:22:22,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-20T17:22:22,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T17:22:22,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0120 sec 2024-11-20T17:22:22,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.0150 sec 2024-11-20T17:22:22,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:22:22,379 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T17:22:22,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:22,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-20T17:22:22,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T17:22:22,382 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:22,382 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:22,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T17:22:22,534 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:22,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-20T17:22:22,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:22,535 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:22:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:22,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/0e5a7cdb48d24856b3a83c2816969e25 is 50, key is test_row_0/A:col10/1732123341465/Put/seqid=0 2024-11-20T17:22:22,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742142_1318 (size=12301) 2024-11-20T17:22:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:22,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. as already flushing 2024-11-20T17:22:22,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123402589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123402590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123402591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T17:22:22,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123402692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123402693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123402694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123402895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123402896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:22,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123402897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:22,944 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/0e5a7cdb48d24856b3a83c2816969e25 2024-11-20T17:22:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/56b320529b2f46709df2afb93360bf20 is 50, key is test_row_0/B:col10/1732123341465/Put/seqid=0 2024-11-20T17:22:22,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742143_1319 (size=12301) 2024-11-20T17:22:22,956 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/56b320529b2f46709df2afb93360bf20 2024-11-20T17:22:22,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/05658532bc6d43f9836e9bedf291740a is 50, key is test_row_0/C:col10/1732123341465/Put/seqid=0 2024-11-20T17:22:22,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T17:22:22,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742144_1320 (size=12301) 2024-11-20T17:22:22,988 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/05658532bc6d43f9836e9bedf291740a 2024-11-20T17:22:22,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/0e5a7cdb48d24856b3a83c2816969e25 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/0e5a7cdb48d24856b3a83c2816969e25 2024-11-20T17:22:22,998 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/0e5a7cdb48d24856b3a83c2816969e25, entries=150, sequenceid=393, filesize=12.0 K 2024-11-20T17:22:22,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/56b320529b2f46709df2afb93360bf20 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/56b320529b2f46709df2afb93360bf20 2024-11-20T17:22:23,003 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/56b320529b2f46709df2afb93360bf20, entries=150, sequenceid=393, filesize=12.0 K 2024-11-20T17:22:23,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/05658532bc6d43f9836e9bedf291740a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/05658532bc6d43f9836e9bedf291740a 2024-11-20T17:22:23,008 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/05658532bc6d43f9836e9bedf291740a, entries=150, sequenceid=393, filesize=12.0 K 2024-11-20T17:22:23,009 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3030edb9ba565d9149afc5328873a8ef in 474ms, sequenceid=393, compaction requested=true 2024-11-20T17:22:23,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:23,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:23,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-20T17:22:23,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-20T17:22:23,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T17:22:23,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 628 msec 2024-11-20T17:22:23,014 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 633 msec 2024-11-20T17:22:23,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:23,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:22:23,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:23,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:23,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:23,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:23,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:23,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:23,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/c2db3e9153434331a2373a5da93ad8da is 50, key is test_row_0/A:col10/1732123342590/Put/seqid=0 2024-11-20T17:22:23,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742145_1321 (size=12301) 2024-11-20T17:22:23,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123403201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123403203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123403204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123403204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123403204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123403307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123403307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123403308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123403307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,415 DEBUG [Thread-1133 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:55266 2024-11-20T17:22:23,415 DEBUG [Thread-1129 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:55266 2024-11-20T17:22:23,415 DEBUG [Thread-1133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:23,415 DEBUG [Thread-1129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:23,416 DEBUG [Thread-1131 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:55266 2024-11-20T17:22:23,416 DEBUG [Thread-1131 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:23,417 DEBUG [Thread-1137 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:55266 2024-11-20T17:22:23,417 DEBUG [Thread-1137 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:23,417 DEBUG [Thread-1135 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:55266 2024-11-20T17:22:23,417 DEBUG [Thread-1135 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:23,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T17:22:23,485 INFO [Thread-1128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-20T17:22:23,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123403511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123403511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123403511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123403512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/c2db3e9153434331a2373a5da93ad8da 2024-11-20T17:22:23,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/06bd19a17f4f42f897416684f25584e9 is 50, key is test_row_0/B:col10/1732123342590/Put/seqid=0 2024-11-20T17:22:23,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742146_1322 (size=12301) 2024-11-20T17:22:23,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40122 deadline: 1732123403705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123403813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123403813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123403814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:23,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123403815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:23,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/06bd19a17f4f42f897416684f25584e9 2024-11-20T17:22:23,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/10f1112a406c4f16813eff44785811ee is 50, key is test_row_0/C:col10/1732123342590/Put/seqid=0 2024-11-20T17:22:23,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742147_1323 (size=12301) 2024-11-20T17:22:24,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1732123404315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:24,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40156 deadline: 1732123404316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:24,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40134 deadline: 1732123404317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:24,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40104 deadline: 1732123404317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:24,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/10f1112a406c4f16813eff44785811ee 2024-11-20T17:22:24,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/c2db3e9153434331a2373a5da93ad8da as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/c2db3e9153434331a2373a5da93ad8da 2024-11-20T17:22:24,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/c2db3e9153434331a2373a5da93ad8da, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T17:22:24,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/06bd19a17f4f42f897416684f25584e9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/06bd19a17f4f42f897416684f25584e9 2024-11-20T17:22:24,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/06bd19a17f4f42f897416684f25584e9, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T17:22:24,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/10f1112a406c4f16813eff44785811ee as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/10f1112a406c4f16813eff44785811ee 2024-11-20T17:22:24,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/10f1112a406c4f16813eff44785811ee, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T17:22:24,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 3030edb9ba565d9149afc5328873a8ef in 1244ms, sequenceid=407, compaction requested=true 2024-11-20T17:22:24,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:24,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:24,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:24,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:24,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:24,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3030edb9ba565d9149afc5328873a8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:24,397 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:22:24,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:24,397 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:22:24,398 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:22:24,398 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:22:24,398 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/B is initiating minor compaction (all files) 2024-11-20T17:22:24,398 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/A is initiating minor compaction (all files) 2024-11-20T17:22:24,398 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/A in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:24,398 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/B in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:24,398 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/26d4626284a84e9b86460ca2c2838201, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/441e88456997489d98e54422c1b2695f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/940ce672d102450ca6899932d486742f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/0e5a7cdb48d24856b3a83c2816969e25, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/c2db3e9153434331a2373a5da93ad8da] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=60.8 K 2024-11-20T17:22:24,398 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9b9128b44fd84c88891889be9b109f41, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/8c6c66b6a009487b8f33cf9362419008, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/56b320529b2f46709df2afb93360bf20, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/06bd19a17f4f42f897416684f25584e9] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=60.8 K 2024-11-20T17:22:24,398 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 67336f9d6a8d4c4cb6a8a56af4ec7ac2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732123336727 2024-11-20T17:22:24,398 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26d4626284a84e9b86460ca2c2838201, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732123336727 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b9128b44fd84c88891889be9b109f41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732123339008 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 441e88456997489d98e54422c1b2695f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732123339008 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c6c66b6a009487b8f33cf9362419008, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732123341135 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 940ce672d102450ca6899932d486742f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732123341135 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 56b320529b2f46709df2afb93360bf20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1732123341464 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 06bd19a17f4f42f897416684f25584e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732123342584 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e5a7cdb48d24856b3a83c2816969e25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1732123341464 2024-11-20T17:22:24,399 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2db3e9153434331a2373a5da93ad8da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732123342584 2024-11-20T17:22:24,408 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#B#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:24,408 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#A#compaction#281 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:24,409 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/e68cd8e5b5484997a6b0c8a94da19ea0 is 50, key is test_row_0/B:col10/1732123342590/Put/seqid=0 2024-11-20T17:22:24,409 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/23a146b0e9344971a42e6e66032c910d is 50, key is test_row_0/A:col10/1732123342590/Put/seqid=0 2024-11-20T17:22:24,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742148_1324 (size=13221) 2024-11-20T17:22:24,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742149_1325 (size=13221) 2024-11-20T17:22:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:24,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:22:24,710 DEBUG [Thread-1124 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:55266 2024-11-20T17:22:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:24,710 DEBUG [Thread-1124 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:24,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/621160b44d084a668d2e4620173b9c77 is 50, key is test_row_0/A:col10/1732123344709/Put/seqid=0 2024-11-20T17:22:24,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742150_1326 (size=12301) 2024-11-20T17:22:24,817 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/e68cd8e5b5484997a6b0c8a94da19ea0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e68cd8e5b5484997a6b0c8a94da19ea0 2024-11-20T17:22:24,817 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/23a146b0e9344971a42e6e66032c910d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/23a146b0e9344971a42e6e66032c910d 2024-11-20T17:22:24,821 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/B of 3030edb9ba565d9149afc5328873a8ef into e68cd8e5b5484997a6b0c8a94da19ea0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:24,821 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/A of 3030edb9ba565d9149afc5328873a8ef into 23a146b0e9344971a42e6e66032c910d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:24,821 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/B, priority=11, startTime=1732123344396; duration=0sec 2024-11-20T17:22:24,821 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/A, priority=11, startTime=1732123344396; duration=0sec 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:B 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:A 2024-11-20T17:22:24,821 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T17:22:24,823 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T17:22:24,823 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 3030edb9ba565d9149afc5328873a8ef/C is initiating minor compaction (all files) 2024-11-20T17:22:24,823 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3030edb9ba565d9149afc5328873a8ef/C in TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:24,823 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/94799cdcd6894d738694b4866452f6fd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/fbead36e3539434fba6ccb6661e74475, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/6f557ab89b0c43d7a475935d50f43471, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/05658532bc6d43f9836e9bedf291740a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/10f1112a406c4f16813eff44785811ee] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp, totalSize=60.8 K 2024-11-20T17:22:24,823 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 94799cdcd6894d738694b4866452f6fd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732123336727 2024-11-20T17:22:24,824 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting fbead36e3539434fba6ccb6661e74475, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732123339008 2024-11-20T17:22:24,824 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f557ab89b0c43d7a475935d50f43471, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732123341135 2024-11-20T17:22:24,824 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 05658532bc6d43f9836e9bedf291740a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1732123341464 2024-11-20T17:22:24,824 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 10f1112a406c4f16813eff44785811ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732123342584 2024-11-20T17:22:24,832 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3030edb9ba565d9149afc5328873a8ef#C#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:24,832 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/bb7a3c78bd05464ba7ef222243802037 is 50, key is test_row_0/C:col10/1732123342590/Put/seqid=0 2024-11-20T17:22:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742151_1327 (size=13221) 2024-11-20T17:22:25,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/621160b44d084a668d2e4620173b9c77 2024-11-20T17:22:25,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/2a944bb26cef4b25a3131b3dee07846d is 50, key is test_row_0/B:col10/1732123344709/Put/seqid=0 2024-11-20T17:22:25,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742152_1328 (size=12301) 2024-11-20T17:22:25,240 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/bb7a3c78bd05464ba7ef222243802037 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bb7a3c78bd05464ba7ef222243802037 2024-11-20T17:22:25,244 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3030edb9ba565d9149afc5328873a8ef/C of 3030edb9ba565d9149afc5328873a8ef into bb7a3c78bd05464ba7ef222243802037(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:25,244 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:25,244 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef., storeName=3030edb9ba565d9149afc5328873a8ef/C, priority=11, startTime=1732123344397; duration=0sec 2024-11-20T17:22:25,244 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:25,244 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3030edb9ba565d9149afc5328873a8ef:C 2024-11-20T17:22:25,319 DEBUG [Thread-1122 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:55266 2024-11-20T17:22:25,319 DEBUG [Thread-1126 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:55266 2024-11-20T17:22:25,319 DEBUG [Thread-1122 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:25,319 DEBUG [Thread-1126 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:25,325 DEBUG [Thread-1120 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:55266 2024-11-20T17:22:25,325 DEBUG [Thread-1120 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:25,326 DEBUG [Thread-1118 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:55266 2024-11-20T17:22:25,326 DEBUG [Thread-1118 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7693 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7497 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7690 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7675 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7478 2024-11-20T17:22:25,326 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:22:25,326 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:22:25,326 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a569490 to 127.0.0.1:55266 2024-11-20T17:22:25,326 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:25,327 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:22:25,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:22:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:22:25,330 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123345329"}]},"ts":"1732123345329"} 2024-11-20T17:22:25,330 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:22:25,333 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:22:25,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:22:25,334 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, UNASSIGN}] 2024-11-20T17:22:25,334 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, UNASSIGN 2024-11-20T17:22:25,335 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=3030edb9ba565d9149afc5328873a8ef, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:25,336 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:22:25,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:22:25,487 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:25,487 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:25,487 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:22:25,488 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 3030edb9ba565d9149afc5328873a8ef, disabling compactions & flushes 2024-11-20T17:22:25,488 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:25,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/2a944bb26cef4b25a3131b3dee07846d 2024-11-20T17:22:25,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/cf29cf175df1485391358796b9f68cfc is 50, key is test_row_0/C:col10/1732123344709/Put/seqid=0 2024-11-20T17:22:25,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742153_1329 (size=12301) 2024-11-20T17:22:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:22:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:22:25,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/cf29cf175df1485391358796b9f68cfc 2024-11-20T17:22:25,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/621160b44d084a668d2e4620173b9c77 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/621160b44d084a668d2e4620173b9c77 2024-11-20T17:22:25,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/621160b44d084a668d2e4620173b9c77, entries=150, sequenceid=432, filesize=12.0 K 2024-11-20T17:22:25,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/2a944bb26cef4b25a3131b3dee07846d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/2a944bb26cef4b25a3131b3dee07846d 2024-11-20T17:22:25,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/2a944bb26cef4b25a3131b3dee07846d, entries=150, sequenceid=432, filesize=12.0 K 2024-11-20T17:22:25,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/cf29cf175df1485391358796b9f68cfc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cf29cf175df1485391358796b9f68cfc 2024-11-20T17:22:25,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cf29cf175df1485391358796b9f68cfc, entries=150, sequenceid=432, filesize=12.0 K 2024-11-20T17:22:25,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=26.84 KB/27480 for 3030edb9ba565d9149afc5328873a8ef in 1242ms, sequenceid=432, compaction requested=false 2024-11-20T17:22:25,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:25,952 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. after waiting 0 ms 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:25,952 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(2837): Flushing 3030edb9ba565d9149afc5328873a8ef 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=A 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=B 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3030edb9ba565d9149afc5328873a8ef, store=C 2024-11-20T17:22:25,952 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:25,955 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/9d9215127bfb4657b3acaf3071284cbc is 50, key is test_row_0/A:col10/1732123345318/Put/seqid=0 2024-11-20T17:22:25,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742154_1330 (size=9857) 2024-11-20T17:22:26,359 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/9d9215127bfb4657b3acaf3071284cbc 2024-11-20T17:22:26,365 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/7fdbae986270445cabb4ef8fbf15fd8c is 50, key is test_row_0/B:col10/1732123345318/Put/seqid=0 2024-11-20T17:22:26,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742155_1331 (size=9857) 2024-11-20T17:22:26,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:22:26,769 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/7fdbae986270445cabb4ef8fbf15fd8c 2024-11-20T17:22:26,774 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/c8e208cd5df64ad6ac55ac799d70460c is 50, key is test_row_0/C:col10/1732123345318/Put/seqid=0 2024-11-20T17:22:26,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742156_1332 (size=9857) 2024-11-20T17:22:27,178 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/c8e208cd5df64ad6ac55ac799d70460c 2024-11-20T17:22:27,183 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/A/9d9215127bfb4657b3acaf3071284cbc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/9d9215127bfb4657b3acaf3071284cbc 2024-11-20T17:22:27,186 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/9d9215127bfb4657b3acaf3071284cbc, entries=100, sequenceid=442, filesize=9.6 K 2024-11-20T17:22:27,186 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/B/7fdbae986270445cabb4ef8fbf15fd8c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7fdbae986270445cabb4ef8fbf15fd8c 2024-11-20T17:22:27,189 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7fdbae986270445cabb4ef8fbf15fd8c, entries=100, sequenceid=442, filesize=9.6 K 2024-11-20T17:22:27,190 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/.tmp/C/c8e208cd5df64ad6ac55ac799d70460c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c8e208cd5df64ad6ac55ac799d70460c 2024-11-20T17:22:27,193 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c8e208cd5df64ad6ac55ac799d70460c, entries=100, sequenceid=442, filesize=9.6 K 2024-11-20T17:22:27,194 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 3030edb9ba565d9149afc5328873a8ef in 1242ms, sequenceid=442, compaction requested=true 2024-11-20T17:22:27,194 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bfe15609564444eba1c52b1cc8aac1ed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/7cdd7bfbe0ff44b5845419e89bdca5b6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/000e494816d64c35b8ab85b711051b74, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/85a16885936f42a8904ce63be9266992, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/61cfe4e8b51d424385b47f643d405430, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/5e0d48db29bb4926bd947974e124dd7c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/b72494d93b794f41bd729617bdb61d13, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/521dbe6723004b4ebccdac27bef5fce6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/68918d6413ed4175b2f7732441d0b095, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/72bbcb4b7a514722aa9797bb479111de, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/fe1462a7bd7846d78841d61a28e77e67, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bf9c590c2c1242f99599db0c0e298f18, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/4d65492016a046a290bfda8e938ec0fd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/3c208e9c3b80422582b7466f5a48f294, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/dc0bc211357f48bbafe1eb5438cd363c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/8f2de56c102c4634a7978e6e0e0a95ff, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/ef107feb57124a8bb5c5e318923dce73, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/312ce682b32d404d8371778f07ef08bc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/450dd80eaa384b5f961aa8ffc3ff82a0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/a83e513ecb0f4d36997637f3d9b79dec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bd54be9b9fb844ceb256f2a47e8e23d8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/26d4626284a84e9b86460ca2c2838201, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/32b79296f7064815a2bc47b135b262cd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/441e88456997489d98e54422c1b2695f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/940ce672d102450ca6899932d486742f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/0e5a7cdb48d24856b3a83c2816969e25, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/c2db3e9153434331a2373a5da93ad8da] to archive 2024-11-20T17:22:27,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:27,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bfe15609564444eba1c52b1cc8aac1ed to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bfe15609564444eba1c52b1cc8aac1ed 2024-11-20T17:22:27,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/7cdd7bfbe0ff44b5845419e89bdca5b6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/7cdd7bfbe0ff44b5845419e89bdca5b6 2024-11-20T17:22:27,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/000e494816d64c35b8ab85b711051b74 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/000e494816d64c35b8ab85b711051b74 2024-11-20T17:22:27,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/85a16885936f42a8904ce63be9266992 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/85a16885936f42a8904ce63be9266992 2024-11-20T17:22:27,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/61cfe4e8b51d424385b47f643d405430 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/61cfe4e8b51d424385b47f643d405430 2024-11-20T17:22:27,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/5e0d48db29bb4926bd947974e124dd7c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/5e0d48db29bb4926bd947974e124dd7c 2024-11-20T17:22:27,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/b72494d93b794f41bd729617bdb61d13 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/b72494d93b794f41bd729617bdb61d13 2024-11-20T17:22:27,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/521dbe6723004b4ebccdac27bef5fce6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/521dbe6723004b4ebccdac27bef5fce6 2024-11-20T17:22:27,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/68918d6413ed4175b2f7732441d0b095 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/68918d6413ed4175b2f7732441d0b095 2024-11-20T17:22:27,205 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/72bbcb4b7a514722aa9797bb479111de to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/72bbcb4b7a514722aa9797bb479111de 2024-11-20T17:22:27,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/fe1462a7bd7846d78841d61a28e77e67 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/fe1462a7bd7846d78841d61a28e77e67 2024-11-20T17:22:27,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bf9c590c2c1242f99599db0c0e298f18 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bf9c590c2c1242f99599db0c0e298f18 2024-11-20T17:22:27,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/4d65492016a046a290bfda8e938ec0fd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/4d65492016a046a290bfda8e938ec0fd 2024-11-20T17:22:27,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/3c208e9c3b80422582b7466f5a48f294 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/3c208e9c3b80422582b7466f5a48f294 2024-11-20T17:22:27,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/dc0bc211357f48bbafe1eb5438cd363c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/dc0bc211357f48bbafe1eb5438cd363c 2024-11-20T17:22:27,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/8f2de56c102c4634a7978e6e0e0a95ff to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/8f2de56c102c4634a7978e6e0e0a95ff 2024-11-20T17:22:27,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/ef107feb57124a8bb5c5e318923dce73 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/ef107feb57124a8bb5c5e318923dce73 2024-11-20T17:22:27,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/312ce682b32d404d8371778f07ef08bc to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/312ce682b32d404d8371778f07ef08bc 2024-11-20T17:22:27,212 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/450dd80eaa384b5f961aa8ffc3ff82a0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/450dd80eaa384b5f961aa8ffc3ff82a0 2024-11-20T17:22:27,213 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/a83e513ecb0f4d36997637f3d9b79dec to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/a83e513ecb0f4d36997637f3d9b79dec 2024-11-20T17:22:27,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bd54be9b9fb844ceb256f2a47e8e23d8 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/bd54be9b9fb844ceb256f2a47e8e23d8 2024-11-20T17:22:27,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/26d4626284a84e9b86460ca2c2838201 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/26d4626284a84e9b86460ca2c2838201 2024-11-20T17:22:27,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/32b79296f7064815a2bc47b135b262cd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/32b79296f7064815a2bc47b135b262cd 2024-11-20T17:22:27,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/441e88456997489d98e54422c1b2695f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/441e88456997489d98e54422c1b2695f 2024-11-20T17:22:27,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/940ce672d102450ca6899932d486742f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/940ce672d102450ca6899932d486742f 2024-11-20T17:22:27,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/0e5a7cdb48d24856b3a83c2816969e25 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/0e5a7cdb48d24856b3a83c2816969e25 2024-11-20T17:22:27,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/c2db3e9153434331a2373a5da93ad8da to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/c2db3e9153434331a2373a5da93ad8da 2024-11-20T17:22:27,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1680a662e84d45afa1626e914f858543, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/17ca120437704d6db0537dc2206259ec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1a59e34846874f68a876e51266d1ae98, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e0ddf8140ee4827a793853ed1c9dca8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/c95e944d9edc413f9ca5f40b7dd8d7c9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/078dfd3501554c488c3d6b87bccb57c2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6a962a25dfe34afe9677135434763f3b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/dbc6718c166442a4ba9ea6bf49a8cfd5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/f45cdde1f4e14c5b8026426f63aa92aa, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6c098ea54501433f972c6a4c8ed0d398, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9178e01eb8de4a339a6459b3a30a5267, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e470b5356cc4cbcbf6a86f7ea6da3b8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/a378cf463b814ac382197a9744634d7f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/01d0828617f14f819ff9719ca4ea2d9b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/172518b1eb524160a6108112df3e3a28, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e52c49ca0c664ef6a354a38f92ed2807, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/654e5108747b44d799104b021e945d3c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/93d3ccc9248e4e28a14caa9a51469e83, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/97577bba66994e8e9a3770afdd8df2fe, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7e924788b3184d84867bd603647d0be7, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/d1a0312fa0eb47f4acc6e02ec71bb81e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/320978641a1c4ae3ad5afcdf4db18250, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9b9128b44fd84c88891889be9b109f41, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/8c6c66b6a009487b8f33cf9362419008, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/56b320529b2f46709df2afb93360bf20, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/06bd19a17f4f42f897416684f25584e9] to archive 2024-11-20T17:22:27,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:27,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1680a662e84d45afa1626e914f858543 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1680a662e84d45afa1626e914f858543 2024-11-20T17:22:27,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/17ca120437704d6db0537dc2206259ec to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/17ca120437704d6db0537dc2206259ec 2024-11-20T17:22:27,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1a59e34846874f68a876e51266d1ae98 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/1a59e34846874f68a876e51266d1ae98 2024-11-20T17:22:27,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e0ddf8140ee4827a793853ed1c9dca8 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e0ddf8140ee4827a793853ed1c9dca8 2024-11-20T17:22:27,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/c95e944d9edc413f9ca5f40b7dd8d7c9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/c95e944d9edc413f9ca5f40b7dd8d7c9 2024-11-20T17:22:27,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/078dfd3501554c488c3d6b87bccb57c2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/078dfd3501554c488c3d6b87bccb57c2 2024-11-20T17:22:27,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6a962a25dfe34afe9677135434763f3b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6a962a25dfe34afe9677135434763f3b 2024-11-20T17:22:27,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/dbc6718c166442a4ba9ea6bf49a8cfd5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/dbc6718c166442a4ba9ea6bf49a8cfd5 2024-11-20T17:22:27,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/f45cdde1f4e14c5b8026426f63aa92aa to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/f45cdde1f4e14c5b8026426f63aa92aa 2024-11-20T17:22:27,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6c098ea54501433f972c6a4c8ed0d398 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/6c098ea54501433f972c6a4c8ed0d398 2024-11-20T17:22:27,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9178e01eb8de4a339a6459b3a30a5267 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9178e01eb8de4a339a6459b3a30a5267 2024-11-20T17:22:27,232 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e470b5356cc4cbcbf6a86f7ea6da3b8 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/0e470b5356cc4cbcbf6a86f7ea6da3b8 2024-11-20T17:22:27,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/a378cf463b814ac382197a9744634d7f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/a378cf463b814ac382197a9744634d7f 2024-11-20T17:22:27,234 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/01d0828617f14f819ff9719ca4ea2d9b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/01d0828617f14f819ff9719ca4ea2d9b 2024-11-20T17:22:27,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/172518b1eb524160a6108112df3e3a28 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/172518b1eb524160a6108112df3e3a28 2024-11-20T17:22:27,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e52c49ca0c664ef6a354a38f92ed2807 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e52c49ca0c664ef6a354a38f92ed2807 2024-11-20T17:22:27,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/654e5108747b44d799104b021e945d3c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/654e5108747b44d799104b021e945d3c 2024-11-20T17:22:27,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/93d3ccc9248e4e28a14caa9a51469e83 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/93d3ccc9248e4e28a14caa9a51469e83 2024-11-20T17:22:27,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/97577bba66994e8e9a3770afdd8df2fe to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/97577bba66994e8e9a3770afdd8df2fe 2024-11-20T17:22:27,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7e924788b3184d84867bd603647d0be7 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7e924788b3184d84867bd603647d0be7 2024-11-20T17:22:27,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/d1a0312fa0eb47f4acc6e02ec71bb81e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/d1a0312fa0eb47f4acc6e02ec71bb81e 2024-11-20T17:22:27,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/67336f9d6a8d4c4cb6a8a56af4ec7ac2 2024-11-20T17:22:27,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/320978641a1c4ae3ad5afcdf4db18250 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/320978641a1c4ae3ad5afcdf4db18250 2024-11-20T17:22:27,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9b9128b44fd84c88891889be9b109f41 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/9b9128b44fd84c88891889be9b109f41 2024-11-20T17:22:27,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/8c6c66b6a009487b8f33cf9362419008 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/8c6c66b6a009487b8f33cf9362419008 2024-11-20T17:22:27,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/56b320529b2f46709df2afb93360bf20 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/56b320529b2f46709df2afb93360bf20 2024-11-20T17:22:27,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/06bd19a17f4f42f897416684f25584e9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/06bd19a17f4f42f897416684f25584e9 2024-11-20T17:22:27,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/dbb7158ba5e7465f98864423268575a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/02da981306b6494c8eac8de2237c7ec6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d9dde800cae241d1bf417946e32a371d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/00b187fd0f0841858f0ddcc5e3d183d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0b3b519a7e0d467d810a7e6f2447be13, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/09b8596fa6df41efb980ef3825410d51, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cd53a8c2ca5041f29ed0394ea6e34a38, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/f84d4033a21e4567bdebf6bdf126c9b4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/a22f6239b9f44684ab33c7af36a9af19, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/68e6534acae54d3bb22b0d688bb94c55, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0ea4979d826044129672016494100156, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bfce8245240a46759e4a1757b339a406, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d306e6065bf847d79494c69b6146ac69, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/e6e877b0f3574ac3bd3defe0ad0632cd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c1b7da8922534af9a96962b5c5550afd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ac6a510efe1248379973afcd0271aa6f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/15ee6bfdb8e2472ebdd9e124482bbc9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/8e66b5ad91db425aa5665c35f20455af, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d7128cd0019148cfb4a593a31ad5e795, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/4c791a6687d14f45b3432356dcb220b9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/b2a5e999708c4883863b71c00881491e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/94799cdcd6894d738694b4866452f6fd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ceadff24824047829b53e3e192fe71cf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/fbead36e3539434fba6ccb6661e74475, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/6f557ab89b0c43d7a475935d50f43471, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/05658532bc6d43f9836e9bedf291740a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/10f1112a406c4f16813eff44785811ee] to archive 2024-11-20T17:22:27,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:27,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/dbb7158ba5e7465f98864423268575a4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/dbb7158ba5e7465f98864423268575a4 2024-11-20T17:22:27,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/02da981306b6494c8eac8de2237c7ec6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/02da981306b6494c8eac8de2237c7ec6 2024-11-20T17:22:27,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d9dde800cae241d1bf417946e32a371d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d9dde800cae241d1bf417946e32a371d 2024-11-20T17:22:27,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/00b187fd0f0841858f0ddcc5e3d183d5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/00b187fd0f0841858f0ddcc5e3d183d5 2024-11-20T17:22:27,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0b3b519a7e0d467d810a7e6f2447be13 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0b3b519a7e0d467d810a7e6f2447be13 2024-11-20T17:22:27,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/09b8596fa6df41efb980ef3825410d51 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/09b8596fa6df41efb980ef3825410d51 2024-11-20T17:22:27,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cd53a8c2ca5041f29ed0394ea6e34a38 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cd53a8c2ca5041f29ed0394ea6e34a38 2024-11-20T17:22:27,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/f84d4033a21e4567bdebf6bdf126c9b4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/f84d4033a21e4567bdebf6bdf126c9b4 2024-11-20T17:22:27,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/a22f6239b9f44684ab33c7af36a9af19 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/a22f6239b9f44684ab33c7af36a9af19 2024-11-20T17:22:27,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/68e6534acae54d3bb22b0d688bb94c55 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/68e6534acae54d3bb22b0d688bb94c55 2024-11-20T17:22:27,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0ea4979d826044129672016494100156 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/0ea4979d826044129672016494100156 2024-11-20T17:22:27,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bfce8245240a46759e4a1757b339a406 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bfce8245240a46759e4a1757b339a406 2024-11-20T17:22:27,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d306e6065bf847d79494c69b6146ac69 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d306e6065bf847d79494c69b6146ac69 2024-11-20T17:22:27,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/e6e877b0f3574ac3bd3defe0ad0632cd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/e6e877b0f3574ac3bd3defe0ad0632cd 2024-11-20T17:22:27,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c1b7da8922534af9a96962b5c5550afd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c1b7da8922534af9a96962b5c5550afd 2024-11-20T17:22:27,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ac6a510efe1248379973afcd0271aa6f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ac6a510efe1248379973afcd0271aa6f 2024-11-20T17:22:27,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/15ee6bfdb8e2472ebdd9e124482bbc9a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/15ee6bfdb8e2472ebdd9e124482bbc9a 2024-11-20T17:22:27,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/8e66b5ad91db425aa5665c35f20455af to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/8e66b5ad91db425aa5665c35f20455af 2024-11-20T17:22:27,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d7128cd0019148cfb4a593a31ad5e795 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/d7128cd0019148cfb4a593a31ad5e795 2024-11-20T17:22:27,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/4c791a6687d14f45b3432356dcb220b9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/4c791a6687d14f45b3432356dcb220b9 2024-11-20T17:22:27,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/b2a5e999708c4883863b71c00881491e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/b2a5e999708c4883863b71c00881491e 2024-11-20T17:22:27,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/94799cdcd6894d738694b4866452f6fd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/94799cdcd6894d738694b4866452f6fd 2024-11-20T17:22:27,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ceadff24824047829b53e3e192fe71cf to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/ceadff24824047829b53e3e192fe71cf 2024-11-20T17:22:27,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/fbead36e3539434fba6ccb6661e74475 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/fbead36e3539434fba6ccb6661e74475 2024-11-20T17:22:27,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/6f557ab89b0c43d7a475935d50f43471 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/6f557ab89b0c43d7a475935d50f43471 2024-11-20T17:22:27,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/05658532bc6d43f9836e9bedf291740a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/05658532bc6d43f9836e9bedf291740a 2024-11-20T17:22:27,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/10f1112a406c4f16813eff44785811ee to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/10f1112a406c4f16813eff44785811ee 2024-11-20T17:22:27,275 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/recovered.edits/445.seqid, newMaxSeqId=445, maxSeqId=1 2024-11-20T17:22:27,276 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef. 2024-11-20T17:22:27,276 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 3030edb9ba565d9149afc5328873a8ef: 2024-11-20T17:22:27,277 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:27,277 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=3030edb9ba565d9149afc5328873a8ef, regionState=CLOSED 2024-11-20T17:22:27,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T17:22:27,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 3030edb9ba565d9149afc5328873a8ef, server=d514dc944523,40121,1732123262111 in 1.9420 sec 2024-11-20T17:22:27,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-20T17:22:27,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3030edb9ba565d9149afc5328873a8ef, UNASSIGN in 1.9450 sec 2024-11-20T17:22:27,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T17:22:27,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9480 sec 2024-11-20T17:22:27,283 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123347282"}]},"ts":"1732123347282"} 2024-11-20T17:22:27,283 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:22:27,285 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:22:27,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9590 sec 2024-11-20T17:22:27,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:22:27,433 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-20T17:22:27,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:22:27,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,435 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T17:22:27,435 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,438 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:27,439 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/recovered.edits] 2024-11-20T17:22:27,442 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/23a146b0e9344971a42e6e66032c910d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/23a146b0e9344971a42e6e66032c910d 2024-11-20T17:22:27,443 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/621160b44d084a668d2e4620173b9c77 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/621160b44d084a668d2e4620173b9c77 2024-11-20T17:22:27,444 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/9d9215127bfb4657b3acaf3071284cbc to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/A/9d9215127bfb4657b3acaf3071284cbc 2024-11-20T17:22:27,445 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/2a944bb26cef4b25a3131b3dee07846d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/2a944bb26cef4b25a3131b3dee07846d 2024-11-20T17:22:27,446 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7fdbae986270445cabb4ef8fbf15fd8c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/7fdbae986270445cabb4ef8fbf15fd8c 2024-11-20T17:22:27,447 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e68cd8e5b5484997a6b0c8a94da19ea0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/B/e68cd8e5b5484997a6b0c8a94da19ea0 2024-11-20T17:22:27,449 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bb7a3c78bd05464ba7ef222243802037 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/bb7a3c78bd05464ba7ef222243802037 2024-11-20T17:22:27,450 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c8e208cd5df64ad6ac55ac799d70460c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/c8e208cd5df64ad6ac55ac799d70460c 2024-11-20T17:22:27,451 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cf29cf175df1485391358796b9f68cfc to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/C/cf29cf175df1485391358796b9f68cfc 2024-11-20T17:22:27,453 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/recovered.edits/445.seqid to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef/recovered.edits/445.seqid 2024-11-20T17:22:27,453 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/3030edb9ba565d9149afc5328873a8ef 2024-11-20T17:22:27,453 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:22:27,455 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,459 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:22:27,461 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:22:27,462 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,462 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:22:27,462 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123347462"}]},"ts":"9223372036854775807"} 2024-11-20T17:22:27,463 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:22:27,463 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3030edb9ba565d9149afc5328873a8ef, NAME => 'TestAcidGuarantees,,1732123321244.3030edb9ba565d9149afc5328873a8ef.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:22:27,464 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:22:27,464 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123347464"}]},"ts":"9223372036854775807"} 2024-11-20T17:22:27,465 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:22:27,467 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,468 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 33 msec 2024-11-20T17:22:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T17:22:27,536 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-20T17:22:27,546 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=240 (was 242), OpenFileDescriptor=452 (was 464), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=326 (was 314) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6173 (was 6224) 2024-11-20T17:22:27,554 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=240, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=326, ProcessCount=11, AvailableMemoryMB=6173 2024-11-20T17:22:27,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:22:27,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:22:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:27,557 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:22:27,557 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:27,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-11-20T17:22:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:22:27,557 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:22:27,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742157_1333 (size=960) 2024-11-20T17:22:27,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:22:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:22:27,964 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:22:27,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742158_1334 (size=53) 2024-11-20T17:22:28,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:22:28,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:28,370 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ac48603af62441d2defd4d588b3226cb, disabling compactions & flushes 2024-11-20T17:22:28,370 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,370 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,370 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. after waiting 0 ms 2024-11-20T17:22:28,370 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,370 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,370 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:28,371 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:22:28,371 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123348371"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123348371"}]},"ts":"1732123348371"} 2024-11-20T17:22:28,372 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:22:28,372 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:22:28,373 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123348373"}]},"ts":"1732123348373"} 2024-11-20T17:22:28,373 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:22:28,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, ASSIGN}] 2024-11-20T17:22:28,378 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, ASSIGN 2024-11-20T17:22:28,378 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:22:28,529 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:28,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:28,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:22:28,682 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:28,684 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,684 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:22:28,685 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,685 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:28,685 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,685 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,686 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,687 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:28,687 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac48603af62441d2defd4d588b3226cb columnFamilyName A 2024-11-20T17:22:28,687 DEBUG [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:28,688 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(327): Store=ac48603af62441d2defd4d588b3226cb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:28,688 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,689 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:28,689 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac48603af62441d2defd4d588b3226cb columnFamilyName B 2024-11-20T17:22:28,689 DEBUG [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:28,689 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(327): Store=ac48603af62441d2defd4d588b3226cb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:28,689 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,690 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:28,690 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac48603af62441d2defd4d588b3226cb columnFamilyName C 2024-11-20T17:22:28,690 DEBUG [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:28,690 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(327): Store=ac48603af62441d2defd4d588b3226cb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:28,691 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,691 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,691 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,693 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:22:28,693 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:28,695 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:22:28,695 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened ac48603af62441d2defd4d588b3226cb; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59352030, jitterRate=-0.11558583378791809}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:22:28,696 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:28,697 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., pid=98, masterSystemTime=1732123348681 2024-11-20T17:22:28,698 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,698 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:28,698 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:28,700 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T17:22:28,700 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 in 169 msec 2024-11-20T17:22:28,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-20T17:22:28,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, ASSIGN in 323 msec 2024-11-20T17:22:28,702 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:22:28,702 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123348702"}]},"ts":"1732123348702"} 2024-11-20T17:22:28,703 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:22:28,705 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:22:28,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-11-20T17:22:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:22:29,661 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-11-20T17:22:29,662 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-11-20T17:22:29,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:29,667 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:29,668 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32924, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:29,669 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:22:29,670 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:22:29,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:22:29,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:22:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:29,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742159_1335 (size=996) 2024-11-20T17:22:30,082 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T17:22:30,082 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T17:22:30,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:22:30,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, REOPEN/MOVE}] 2024-11-20T17:22:30,086 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, REOPEN/MOVE 2024-11-20T17:22:30,086 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,087 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:22:30,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:30,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:30,239 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,239 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:22:30,239 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing ac48603af62441d2defd4d588b3226cb, disabling compactions & flushes 2024-11-20T17:22:30,239 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,239 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,239 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. after waiting 0 ms 2024-11-20T17:22:30,239 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,243 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T17:22:30,243 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,243 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:30,243 WARN [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: ac48603af62441d2defd4d588b3226cb to self. 2024-11-20T17:22:30,244 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,245 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=CLOSED 2024-11-20T17:22:30,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-20T17:22:30,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 in 158 msec 2024-11-20T17:22:30,247 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, REOPEN/MOVE; state=CLOSED, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=true 2024-11-20T17:22:30,397 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:30,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:22:30,550 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:30,552 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,552 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:22:30,553 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,553 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:30,553 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,553 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,554 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,555 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:30,555 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac48603af62441d2defd4d588b3226cb columnFamilyName A 2024-11-20T17:22:30,556 DEBUG [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:30,556 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(327): Store=ac48603af62441d2defd4d588b3226cb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:30,557 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,557 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:30,557 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac48603af62441d2defd4d588b3226cb columnFamilyName B 2024-11-20T17:22:30,557 DEBUG [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:30,558 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(327): Store=ac48603af62441d2defd4d588b3226cb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:30,558 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,558 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:30,558 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac48603af62441d2defd4d588b3226cb columnFamilyName C 2024-11-20T17:22:30,558 DEBUG [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:30,558 INFO [StoreOpener-ac48603af62441d2defd4d588b3226cb-1 {}] regionserver.HStore(327): Store=ac48603af62441d2defd4d588b3226cb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:30,559 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,559 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,560 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,561 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:22:30,562 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,563 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened ac48603af62441d2defd4d588b3226cb; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71027823, jitterRate=0.05839703977108002}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:22:30,563 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:30,564 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., pid=103, masterSystemTime=1732123350549 2024-11-20T17:22:30,565 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,565 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,565 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=OPEN, openSeqNum=5, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-11-20T17:22:30,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 in 168 msec 2024-11-20T17:22:30,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-20T17:22:30,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, REOPEN/MOVE in 482 msec 2024-11-20T17:22:30,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-20T17:22:30,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-20T17:22:30,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 898 msec 2024-11-20T17:22:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T17:22:30,572 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-11-20T17:22:30,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,579 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-11-20T17:22:30,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,583 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-11-20T17:22:30,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,587 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-11-20T17:22:30,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-11-20T17:22:30,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-11-20T17:22:30,597 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,598 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-11-20T17:22:30,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,601 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-11-20T17:22:30,607 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,607 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-11-20T17:22:30,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68035c67 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@627cad17 2024-11-20T17:22:30,614 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a637ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:30,618 DEBUG [hconnection-0x4510a4d1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,618 DEBUG [hconnection-0x1dc23094-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,620 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,620 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,620 DEBUG [hconnection-0x49347c6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,621 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,622 DEBUG [hconnection-0x6077667c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,622 DEBUG [hconnection-0x54e61ba6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,623 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,623 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,624 DEBUG [hconnection-0x23880b4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,624 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,627 DEBUG [hconnection-0x27637193-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,628 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T17:22:30,631 DEBUG [hconnection-0x33dcf21f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,631 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:22:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:30,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:22:30,632 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:30,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:30,632 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,632 DEBUG [hconnection-0x5b043b6b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,633 DEBUG [hconnection-0x31ae54af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:30,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:30,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:30,633 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:30,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:30,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:30,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:30,637 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:30,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123410651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123410652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123410653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123410653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123410654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cf23a626ff26442b91fedacce495dca1_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123350631/Put/seqid=0 2024-11-20T17:22:30,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742160_1336 (size=12154) 2024-11-20T17:22:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:22:30,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123410756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123410756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123410756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123410756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123410757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,783 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:30,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:30,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:30,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:30,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:30,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:30,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:22:30,936 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:30,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:30,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:30,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:30,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:30,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:30,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:30,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123410958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123410959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123410959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123410959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:30,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:30,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123410964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,071 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:31,075 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cf23a626ff26442b91fedacce495dca1_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cf23a626ff26442b91fedacce495dca1_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:31,076 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/82abe5dfe8724ea08dd007a03a595c12, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:31,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/82abe5dfe8724ea08dd007a03a595c12 is 175, key is test_row_0/A:col10/1732123350631/Put/seqid=0 2024-11-20T17:22:31,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742161_1337 (size=30955) 2024-11-20T17:22:31,081 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/82abe5dfe8724ea08dd007a03a595c12 2024-11-20T17:22:31,089 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:31,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:31,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:31,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/15df3c108658418797aaddb8026a46ee is 50, key is test_row_0/B:col10/1732123350631/Put/seqid=0 2024-11-20T17:22:31,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742162_1338 (size=12001) 2024-11-20T17:22:31,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:22:31,242 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:31,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:31,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:31,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123411262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123411265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123411265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123411265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123411270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:31,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:31,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:31,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/15df3c108658418797aaddb8026a46ee 2024-11-20T17:22:31,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2fe70a2029db455ca0c7b614e58ec45a is 50, key is test_row_0/C:col10/1732123350631/Put/seqid=0 2024-11-20T17:22:31,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:31,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742163_1339 (size=12001) 2024-11-20T17:22:31,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2fe70a2029db455ca0c7b614e58ec45a 2024-11-20T17:22:31,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/82abe5dfe8724ea08dd007a03a595c12 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12 2024-11-20T17:22:31,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:31,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:31,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:31,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T17:22:31,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/15df3c108658418797aaddb8026a46ee as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/15df3c108658418797aaddb8026a46ee 2024-11-20T17:22:31,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/15df3c108658418797aaddb8026a46ee, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T17:22:31,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2fe70a2029db455ca0c7b614e58ec45a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2fe70a2029db455ca0c7b614e58ec45a 2024-11-20T17:22:31,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2fe70a2029db455ca0c7b614e58ec45a, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T17:22:31,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ac48603af62441d2defd4d588b3226cb in 946ms, sequenceid=17, compaction requested=false 2024-11-20T17:22:31,577 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T17:22:31,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:31,709 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:31,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:22:31,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:31,710 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:22:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:31,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:31,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d3bce4b14a7249fb89a1eef84cfb20e1_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123350644/Put/seqid=0 2024-11-20T17:22:31,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742164_1340 (size=12154) 2024-11-20T17:22:31,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:31,725 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d3bce4b14a7249fb89a1eef84cfb20e1_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d3bce4b14a7249fb89a1eef84cfb20e1_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:31,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6b04b2379ac48f19a6ec48752f300a4, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:31,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6b04b2379ac48f19a6ec48752f300a4 is 175, key is test_row_0/A:col10/1732123350644/Put/seqid=0 2024-11-20T17:22:31,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742165_1341 (size=30955) 2024-11-20T17:22:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:22:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:31,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:31,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123411778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123411779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123411783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123411784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123411784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123411885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123411885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123411888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123411889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:31,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123411889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123412087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123412088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123412092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123412093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123412093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,131 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6b04b2379ac48f19a6ec48752f300a4 2024-11-20T17:22:32,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/695f9cdee75e4150b7e29118b2e1f93d is 50, key is test_row_0/B:col10/1732123350644/Put/seqid=0 2024-11-20T17:22:32,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742166_1342 (size=12001) 2024-11-20T17:22:32,143 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/695f9cdee75e4150b7e29118b2e1f93d 2024-11-20T17:22:32,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/cd3dd4bab07b4ce69f6470fefc504972 is 50, key is test_row_0/C:col10/1732123350644/Put/seqid=0 2024-11-20T17:22:32,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742167_1343 (size=12001) 2024-11-20T17:22:32,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123412390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123412392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123412395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123412397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123412397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,566 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/cd3dd4bab07b4ce69f6470fefc504972 2024-11-20T17:22:32,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6b04b2379ac48f19a6ec48752f300a4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4 2024-11-20T17:22:32,574 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T17:22:32,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/695f9cdee75e4150b7e29118b2e1f93d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/695f9cdee75e4150b7e29118b2e1f93d 2024-11-20T17:22:32,579 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/695f9cdee75e4150b7e29118b2e1f93d, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T17:22:32,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/cd3dd4bab07b4ce69f6470fefc504972 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/cd3dd4bab07b4ce69f6470fefc504972 2024-11-20T17:22:32,583 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/cd3dd4bab07b4ce69f6470fefc504972, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T17:22:32,584 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ac48603af62441d2defd4d588b3226cb in 874ms, sequenceid=40, compaction requested=false 2024-11-20T17:22:32,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:32,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:32,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T17:22:32,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T17:22:32,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T17:22:32,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9540 sec 2024-11-20T17:22:32,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.9580 sec 2024-11-20T17:22:32,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:22:32,735 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T17:22:32,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:32,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T17:22:32,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:22:32,738 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:32,738 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:32,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:32,804 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:22:32,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:22:32,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:32,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:32,891 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:32,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:32,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:32,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:32,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112071693182144443b9a81160ebb9745923_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123351776/Put/seqid=0 2024-11-20T17:22:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742168_1344 (size=12154) 2024-11-20T17:22:32,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123412945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123412945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123412946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123412947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:32,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:32,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123412948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:22:33,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123413053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123413053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123413054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123413054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123413057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123413261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123413261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123413261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123413261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123413262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:33,308 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112071693182144443b9a81160ebb9745923_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112071693182144443b9a81160ebb9745923_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:33,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/bc4c58509ae5415cbdba7da84831bb79, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:33,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/bc4c58509ae5415cbdba7da84831bb79 is 175, key is test_row_0/A:col10/1732123351776/Put/seqid=0 2024-11-20T17:22:33,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742169_1345 (size=30955) 2024-11-20T17:22:33,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:22:33,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123413566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123413565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123413567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123413567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123413567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:33,723 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/bc4c58509ae5415cbdba7da84831bb79 2024-11-20T17:22:33,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/d6e75758adcc44fd99e613437b9a0899 is 50, key is test_row_0/B:col10/1732123351776/Put/seqid=0 2024-11-20T17:22:33,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742170_1346 (size=12001) 2024-11-20T17:22:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:22:34,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123414074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:34,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123414074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:34,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123414076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:34,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123414077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:34,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123414078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:34,136 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/d6e75758adcc44fd99e613437b9a0899 2024-11-20T17:22:34,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/110f72ddcf894bde850c1678ac2e602a is 50, key is test_row_0/C:col10/1732123351776/Put/seqid=0 2024-11-20T17:22:34,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742171_1347 (size=12001) 2024-11-20T17:22:34,548 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/110f72ddcf894bde850c1678ac2e602a 2024-11-20T17:22:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/bc4c58509ae5415cbdba7da84831bb79 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79 2024-11-20T17:22:34,556 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79, entries=150, sequenceid=53, filesize=30.2 K 2024-11-20T17:22:34,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/d6e75758adcc44fd99e613437b9a0899 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/d6e75758adcc44fd99e613437b9a0899 2024-11-20T17:22:34,560 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/d6e75758adcc44fd99e613437b9a0899, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T17:22:34,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/110f72ddcf894bde850c1678ac2e602a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/110f72ddcf894bde850c1678ac2e602a 2024-11-20T17:22:34,564 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/110f72ddcf894bde850c1678ac2e602a, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T17:22:34,565 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ac48603af62441d2defd4d588b3226cb in 1674ms, sequenceid=53, compaction requested=true 2024-11-20T17:22:34,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:34,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:34,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T17:22:34,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T17:22:34,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T17:22:34,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8280 sec 2024-11-20T17:22:34,568 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.8310 sec 2024-11-20T17:22:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:22:34,842 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T17:22:34,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T17:22:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:22:34,844 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:34,845 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:34,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:22:34,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:34,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:22:34,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:34,998 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:22:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:34,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:35,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e6acb990893e4f80bb912e8d7b23fa03_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123352947/Put/seqid=0 2024-11-20T17:22:35,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742172_1348 (size=12154) 2024-11-20T17:22:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:35,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123415095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123415096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123415097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123415099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123415101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:22:35,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123415202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123415203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123415208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123415208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123415208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123415404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123415408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:35,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123415414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123415414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123415414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,420 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e6acb990893e4f80bb912e8d7b23fa03_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e6acb990893e4f80bb912e8d7b23fa03_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:35,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/857a7db8647245ab8f1c45333baaf798, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:35,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/857a7db8647245ab8f1c45333baaf798 is 175, key is test_row_0/A:col10/1732123352947/Put/seqid=0 2024-11-20T17:22:35,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742173_1349 (size=30955) 2024-11-20T17:22:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:22:35,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123415707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123415714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123415719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123415719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:35,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123415720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:35,831 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/857a7db8647245ab8f1c45333baaf798 2024-11-20T17:22:35,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/be3a593f4f514495a57fec3e86b00cad is 50, key is test_row_0/B:col10/1732123352947/Put/seqid=0 2024-11-20T17:22:35,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742174_1350 (size=12001) 2024-11-20T17:22:35,847 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/be3a593f4f514495a57fec3e86b00cad 2024-11-20T17:22:35,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/d9c7942ccf0c46aabcb5adf11c642469 is 50, key is test_row_0/C:col10/1732123352947/Put/seqid=0 2024-11-20T17:22:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742175_1351 (size=12001) 2024-11-20T17:22:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:22:36,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123416215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:36,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:36,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123416217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:36,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:36,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123416222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:36,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123416225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:36,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123416227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:36,258 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/d9c7942ccf0c46aabcb5adf11c642469 2024-11-20T17:22:36,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/857a7db8647245ab8f1c45333baaf798 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798 2024-11-20T17:22:36,265 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798, entries=150, sequenceid=76, filesize=30.2 K 2024-11-20T17:22:36,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/be3a593f4f514495a57fec3e86b00cad as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/be3a593f4f514495a57fec3e86b00cad 2024-11-20T17:22:36,270 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/be3a593f4f514495a57fec3e86b00cad, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T17:22:36,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/d9c7942ccf0c46aabcb5adf11c642469 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d9c7942ccf0c46aabcb5adf11c642469 2024-11-20T17:22:36,274 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d9c7942ccf0c46aabcb5adf11c642469, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T17:22:36,274 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ac48603af62441d2defd4d588b3226cb in 1277ms, sequenceid=76, compaction requested=true 2024-11-20T17:22:36,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:36,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:36,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T17:22:36,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T17:22:36,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T17:22:36,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4300 sec 2024-11-20T17:22:36,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.4340 sec 2024-11-20T17:22:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:22:36,947 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T17:22:36,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T17:22:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:22:36,950 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:36,950 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:36,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:22:37,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:37,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T17:22:37,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:37,103 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:22:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:37,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb3b7f8d2da745b19b168f2217329d84_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123355096/Put/seqid=0 2024-11-20T17:22:37,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742176_1352 (size=12154) 2024-11-20T17:22:37,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:37,118 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb3b7f8d2da745b19b168f2217329d84_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb3b7f8d2da745b19b168f2217329d84_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:37,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/85b71758c10a4c22b263f14afebc403e, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:37,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/85b71758c10a4c22b263f14afebc403e is 175, key is test_row_0/A:col10/1732123355096/Put/seqid=0 2024-11-20T17:22:37,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742177_1353 (size=30955) 2024-11-20T17:22:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:37,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:22:37,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123417249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123417253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123417254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123417255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123417256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123417357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123417361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123417361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123417362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123417362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,534 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=89, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/85b71758c10a4c22b263f14afebc403e 2024-11-20T17:22:37,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/18060ba758af46689b927b46a865a794 is 50, key is test_row_0/B:col10/1732123355096/Put/seqid=0 2024-11-20T17:22:37,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742178_1354 (size=12001) 2024-11-20T17:22:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:22:37,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123417559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123417565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123417566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123417566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123417567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123417862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123417868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123417869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123417871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123417872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:37,947 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/18060ba758af46689b927b46a865a794 2024-11-20T17:22:37,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2d7d054ec857410db4634535c356fabd is 50, key is test_row_0/C:col10/1732123355096/Put/seqid=0 2024-11-20T17:22:37,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742179_1355 (size=12001) 2024-11-20T17:22:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:22:38,357 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2d7d054ec857410db4634535c356fabd 2024-11-20T17:22:38,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/85b71758c10a4c22b263f14afebc403e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e 2024-11-20T17:22:38,366 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e, entries=150, sequenceid=89, filesize=30.2 K 2024-11-20T17:22:38,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/18060ba758af46689b927b46a865a794 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/18060ba758af46689b927b46a865a794 2024-11-20T17:22:38,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123418365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,370 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/18060ba758af46689b927b46a865a794, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T17:22:38,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2d7d054ec857410db4634535c356fabd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2d7d054ec857410db4634535c356fabd 2024-11-20T17:22:38,374 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2d7d054ec857410db4634535c356fabd, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T17:22:38,375 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ac48603af62441d2defd4d588b3226cb in 1273ms, sequenceid=89, compaction requested=true 2024-11-20T17:22:38,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:38,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:38,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T17:22:38,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T17:22:38,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T17:22:38,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4260 sec 2024-11-20T17:22:38,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.4290 sec 2024-11-20T17:22:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:38,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:22:38,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:38,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:38,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:38,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:38,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:38,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:38,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202eb1c11764ff41a1a2c92b8089489518_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:38,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742180_1356 (size=17034) 2024-11-20T17:22:38,392 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:38,395 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202eb1c11764ff41a1a2c92b8089489518_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202eb1c11764ff41a1a2c92b8089489518_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:38,396 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/6011360ae98041d1a1c962413ddddcb8, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:38,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/6011360ae98041d1a1c962413ddddcb8 is 175, key is test_row_0/A:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:38,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742181_1357 (size=48139) 2024-11-20T17:22:38,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123418391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123418399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123418403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123418403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123418504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123418506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123418510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123418510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123418711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123418712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123418716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:38,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123418716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:38,801 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=114, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/6011360ae98041d1a1c962413ddddcb8 2024-11-20T17:22:38,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/22fed1946dd64c6aa2d2e2d723b055ae is 50, key is test_row_0/B:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:38,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742182_1358 (size=12001) 2024-11-20T17:22:39,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123419018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123419018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123419024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123419025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:22:39,053 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T17:22:39,054 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-20T17:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T17:22:39,056 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:39,056 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:39,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T17:22:39,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:39,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T17:22:39,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:39,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/22fed1946dd64c6aa2d2e2d723b055ae 2024-11-20T17:22:39,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2da920a518d94968994508ff8119b32c is 50, key is test_row_0/C:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:39,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742183_1359 (size=12001) 2024-11-20T17:22:39,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T17:22:39,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:39,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T17:22:39,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:39,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123419376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:39,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T17:22:39,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:39,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:39,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123419522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123419523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123419529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:39,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123419531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:39,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2da920a518d94968994508ff8119b32c 2024-11-20T17:22:39,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/6011360ae98041d1a1c962413ddddcb8 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8 2024-11-20T17:22:39,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8, entries=250, sequenceid=114, filesize=47.0 K 2024-11-20T17:22:39,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/22fed1946dd64c6aa2d2e2d723b055ae as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/22fed1946dd64c6aa2d2e2d723b055ae 2024-11-20T17:22:39,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/22fed1946dd64c6aa2d2e2d723b055ae, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T17:22:39,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/2da920a518d94968994508ff8119b32c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2da920a518d94968994508ff8119b32c 2024-11-20T17:22:39,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2da920a518d94968994508ff8119b32c, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T17:22:39,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for ac48603af62441d2defd4d588b3226cb in 1263ms, sequenceid=114, compaction requested=true 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:39,643 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T17:22:39,643 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:39,645 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 202914 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T17:22:39,645 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T17:22:39,645 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/A is initiating minor compaction (all files) 2024-11-20T17:22:39,645 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/B is initiating minor compaction (all files) 2024-11-20T17:22:39,645 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/A in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,645 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/B in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,645 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=198.2 K 2024-11-20T17:22:39,645 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/15df3c108658418797aaddb8026a46ee, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/695f9cdee75e4150b7e29118b2e1f93d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/d6e75758adcc44fd99e613437b9a0899, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/be3a593f4f514495a57fec3e86b00cad, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/18060ba758af46689b927b46a865a794, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/22fed1946dd64c6aa2d2e2d723b055ae] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=70.3 K 2024-11-20T17:22:39,645 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,645 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8] 2024-11-20T17:22:39,646 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82abe5dfe8724ea08dd007a03a595c12, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123350624 2024-11-20T17:22:39,646 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 15df3c108658418797aaddb8026a46ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123350624 2024-11-20T17:22:39,646 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6b04b2379ac48f19a6ec48752f300a4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732123350644 2024-11-20T17:22:39,646 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 695f9cdee75e4150b7e29118b2e1f93d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732123350644 2024-11-20T17:22:39,646 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc4c58509ae5415cbdba7da84831bb79, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732123351776 2024-11-20T17:22:39,646 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d6e75758adcc44fd99e613437b9a0899, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732123351776 2024-11-20T17:22:39,647 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 857a7db8647245ab8f1c45333baaf798, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123352944 2024-11-20T17:22:39,647 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting be3a593f4f514495a57fec3e86b00cad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123352944 2024-11-20T17:22:39,647 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85b71758c10a4c22b263f14afebc403e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123355092 2024-11-20T17:22:39,647 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 18060ba758af46689b927b46a865a794, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123355092 2024-11-20T17:22:39,647 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6011360ae98041d1a1c962413ddddcb8, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123357240 2024-11-20T17:22:39,648 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 22fed1946dd64c6aa2d2e2d723b055ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123357254 2024-11-20T17:22:39,656 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:39,658 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#B#compaction#307 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:39,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T17:22:39,658 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/e3c0b45fc9af43c9b2c685bf23377bd2 is 50, key is test_row_0/B:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:39,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:39,664 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411201ad521d9eaac484e8bf0699d9eec4187_ac48603af62441d2defd4d588b3226cb store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:39,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:39,665 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:39,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:39,668 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411201ad521d9eaac484e8bf0699d9eec4187_ac48603af62441d2defd4d588b3226cb, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:39,668 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ad521d9eaac484e8bf0699d9eec4187_ac48603af62441d2defd4d588b3226cb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:39,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742184_1360 (size=12207) 2024-11-20T17:22:39,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204c9ea397b78b487d8104917d2ff0e50b_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123358389/Put/seqid=0 2024-11-20T17:22:39,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742185_1361 (size=4469) 2024-11-20T17:22:39,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742186_1362 (size=12154) 2024-11-20T17:22:40,077 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/e3c0b45fc9af43c9b2c685bf23377bd2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/e3c0b45fc9af43c9b2c685bf23377bd2 2024-11-20T17:22:40,081 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in ac48603af62441d2defd4d588b3226cb/B of ac48603af62441d2defd4d588b3226cb into e3c0b45fc9af43c9b2c685bf23377bd2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:40,081 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:40,081 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/B, priority=10, startTime=1732123359643; duration=0sec 2024-11-20T17:22:40,081 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:40,082 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:B 2024-11-20T17:22:40,082 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T17:22:40,083 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#A#compaction#308 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:40,083 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/6f692dd884c84735ac2f673805b2644f is 175, key is test_row_0/A:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:40,085 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T17:22:40,085 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/C is initiating minor compaction (all files) 2024-11-20T17:22:40,085 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/C in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:40,085 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2fe70a2029db455ca0c7b614e58ec45a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/cd3dd4bab07b4ce69f6470fefc504972, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/110f72ddcf894bde850c1678ac2e602a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d9c7942ccf0c46aabcb5adf11c642469, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2d7d054ec857410db4634535c356fabd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2da920a518d94968994508ff8119b32c] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=70.3 K 2024-11-20T17:22:40,085 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fe70a2029db455ca0c7b614e58ec45a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123350624 2024-11-20T17:22:40,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:40,086 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting cd3dd4bab07b4ce69f6470fefc504972, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732123350644 2024-11-20T17:22:40,086 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 110f72ddcf894bde850c1678ac2e602a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732123351776 2024-11-20T17:22:40,087 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting d9c7942ccf0c46aabcb5adf11c642469, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123352944 2024-11-20T17:22:40,088 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d7d054ec857410db4634535c356fabd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123355092 2024-11-20T17:22:40,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742187_1363 (size=31161) 2024-11-20T17:22:40,088 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2da920a518d94968994508ff8119b32c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123357254 2024-11-20T17:22:40,090 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204c9ea397b78b487d8104917d2ff0e50b_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204c9ea397b78b487d8104917d2ff0e50b_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:40,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/733c30328fb04be18afa2971cba6669c, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:40,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/733c30328fb04be18afa2971cba6669c is 175, key is test_row_0/A:col10/1732123358389/Put/seqid=0 2024-11-20T17:22:40,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742188_1364 (size=30955) 2024-11-20T17:22:40,104 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#C#compaction#310 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:40,104 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/f7a2d53336c240cc92f7ac478ca6dba3 is 50, key is test_row_0/C:col10/1732123357254/Put/seqid=0 2024-11-20T17:22:40,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742189_1365 (size=12207) 2024-11-20T17:22:40,116 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/f7a2d53336c240cc92f7ac478ca6dba3 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/f7a2d53336c240cc92f7ac478ca6dba3 2024-11-20T17:22:40,121 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in ac48603af62441d2defd4d588b3226cb/C of ac48603af62441d2defd4d588b3226cb into f7a2d53336c240cc92f7ac478ca6dba3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:40,121 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:40,121 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/C, priority=10, startTime=1732123359643; duration=0sec 2024-11-20T17:22:40,121 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:40,121 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:C 2024-11-20T17:22:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T17:22:40,493 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/6f692dd884c84735ac2f673805b2644f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6f692dd884c84735ac2f673805b2644f 2024-11-20T17:22:40,497 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=126, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/733c30328fb04be18afa2971cba6669c 2024-11-20T17:22:40,497 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in ac48603af62441d2defd4d588b3226cb/A of ac48603af62441d2defd4d588b3226cb into 6f692dd884c84735ac2f673805b2644f(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:40,498 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:40,498 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/A, priority=10, startTime=1732123359643; duration=0sec 2024-11-20T17:22:40,498 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:40,498 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:A 2024-11-20T17:22:40,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/f1b2622e8ba141dda6a7ee644fc58644 is 50, key is test_row_0/B:col10/1732123358389/Put/seqid=0 2024-11-20T17:22:40,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742190_1366 (size=12001) 2024-11-20T17:22:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:40,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:40,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123420560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123420560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123420563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123420565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123420666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123420666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123420668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123420670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123420868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123420869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123420872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123420875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:40,915 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/f1b2622e8ba141dda6a7ee644fc58644 2024-11-20T17:22:40,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/ba91fb3fe52248f88e7cd5751366422a is 50, key is test_row_0/C:col10/1732123358389/Put/seqid=0 2024-11-20T17:22:40,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742191_1367 (size=12001) 2024-11-20T17:22:40,929 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/ba91fb3fe52248f88e7cd5751366422a 2024-11-20T17:22:40,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/733c30328fb04be18afa2971cba6669c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c 2024-11-20T17:22:40,937 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c, entries=150, sequenceid=126, filesize=30.2 K 2024-11-20T17:22:40,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/f1b2622e8ba141dda6a7ee644fc58644 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f1b2622e8ba141dda6a7ee644fc58644 2024-11-20T17:22:40,942 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f1b2622e8ba141dda6a7ee644fc58644, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T17:22:40,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/ba91fb3fe52248f88e7cd5751366422a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/ba91fb3fe52248f88e7cd5751366422a 2024-11-20T17:22:40,946 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/ba91fb3fe52248f88e7cd5751366422a, entries=150, sequenceid=126, filesize=11.7 K 2024-11-20T17:22:40,947 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ac48603af62441d2defd4d588b3226cb in 1282ms, sequenceid=126, compaction requested=false 2024-11-20T17:22:40,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:40,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:40,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-20T17:22:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-20T17:22:40,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T17:22:40,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8920 sec 2024-11-20T17:22:40,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.8950 sec 2024-11-20T17:22:41,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T17:22:41,160 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T17:22:41,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:41,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T17:22:41,163 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:41,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:41,163 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:41,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:41,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:22:41,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:41,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120946cb806e35a401aa5ebd55837376e9b_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123421184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742192_1368 (size=17284) 2024-11-20T17:22:41,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123421188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123421189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123421189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:41,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123421290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123421293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123421293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123421293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,315 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:41,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:41,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:41,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123421398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,402 DEBUG [Thread-1524 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:22:41,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:41,467 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:41,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:41,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:41,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123421493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123421499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123421499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123421499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,592 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:41,596 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120946cb806e35a401aa5ebd55837376e9b_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120946cb806e35a401aa5ebd55837376e9b_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:41,597 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/5af52cbab59948a4ae7b757a84449fcd, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:41,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/5af52cbab59948a4ae7b757a84449fcd is 175, key is test_row_0/A:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:41,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742193_1369 (size=48389) 2024-11-20T17:22:41,601 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/5af52cbab59948a4ae7b757a84449fcd 2024-11-20T17:22:41,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/0bab8670a844488691434327f8801431 is 50, key is test_row_0/B:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:41,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742194_1370 (size=12151) 2024-11-20T17:22:41,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:41,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:41,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:41,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:41,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:41,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:41,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:41,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123421797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123421803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123421803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:41,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123421804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:41,925 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:41,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:41,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:41,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:41,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:41,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/0bab8670a844488691434327f8801431 2024-11-20T17:22:42,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/281f923bb9bb4700adb2e49f5497154e is 50, key is test_row_0/C:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:42,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742195_1371 (size=12151) 2024-11-20T17:22:42,078 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:42,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:42,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:42,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:42,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:42,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123422302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:42,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123422307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:42,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123422309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:42,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:42,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123422311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:42,383 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:42,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:42,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:42,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:42,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/281f923bb9bb4700adb2e49f5497154e 2024-11-20T17:22:42,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/5af52cbab59948a4ae7b757a84449fcd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd 2024-11-20T17:22:42,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd, entries=250, sequenceid=155, filesize=47.3 K 2024-11-20T17:22:42,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/0bab8670a844488691434327f8801431 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0bab8670a844488691434327f8801431 2024-11-20T17:22:42,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0bab8670a844488691434327f8801431, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T17:22:42,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/281f923bb9bb4700adb2e49f5497154e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/281f923bb9bb4700adb2e49f5497154e 2024-11-20T17:22:42,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/281f923bb9bb4700adb2e49f5497154e, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T17:22:42,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for ac48603af62441d2defd4d588b3226cb in 1262ms, sequenceid=155, compaction requested=true 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:42,439 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:42,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:42,439 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110505 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/B is initiating minor compaction (all files) 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/A is initiating minor compaction (all files) 2024-11-20T17:22:42,440 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/B in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,440 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/A in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,440 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/e3c0b45fc9af43c9b2c685bf23377bd2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f1b2622e8ba141dda6a7ee644fc58644, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0bab8670a844488691434327f8801431] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=35.5 K 2024-11-20T17:22:42,440 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6f692dd884c84735ac2f673805b2644f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=107.9 K 2024-11-20T17:22:42,440 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6f692dd884c84735ac2f673805b2644f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd] 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e3c0b45fc9af43c9b2c685bf23377bd2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123357254 2024-11-20T17:22:42,440 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f692dd884c84735ac2f673805b2644f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123357254 2024-11-20T17:22:42,441 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f1b2622e8ba141dda6a7ee644fc58644, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732123358389 2024-11-20T17:22:42,441 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 733c30328fb04be18afa2971cba6669c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732123358389 2024-11-20T17:22:42,441 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bab8670a844488691434327f8801431, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123360564 2024-11-20T17:22:42,441 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5af52cbab59948a4ae7b757a84449fcd, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123360558 2024-11-20T17:22:42,449 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:42,449 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#B#compaction#316 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:42,450 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/78871194c43748a2a5c4369a8bb880ed is 50, key is test_row_0/B:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:42,452 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112043858aa738f743fea4e9fdca62d5cbf1_ac48603af62441d2defd4d588b3226cb store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:42,453 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112043858aa738f743fea4e9fdca62d5cbf1_ac48603af62441d2defd4d588b3226cb, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:42,453 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112043858aa738f743fea4e9fdca62d5cbf1_ac48603af62441d2defd4d588b3226cb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:42,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742196_1372 (size=12459) 2024-11-20T17:22:42,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742197_1373 (size=4469) 2024-11-20T17:22:42,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:42,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,537 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:42,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:42,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fd11a84b8b934dfa9f76e4f77fc2c29c_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123361188/Put/seqid=0 2024-11-20T17:22:42,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742198_1374 (size=12304) 2024-11-20T17:22:42,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/78871194c43748a2a5c4369a8bb880ed as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/78871194c43748a2a5c4369a8bb880ed 2024-11-20T17:22:42,865 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/B of ac48603af62441d2defd4d588b3226cb into 78871194c43748a2a5c4369a8bb880ed(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:42,865 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:42,865 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/B, priority=13, startTime=1732123362439; duration=0sec 2024-11-20T17:22:42,865 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:42,865 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:B 2024-11-20T17:22:42,865 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:42,866 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#A#compaction#317 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:42,866 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:42,866 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/C is initiating minor compaction (all files) 2024-11-20T17:22:42,866 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/C in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:42,867 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/f7a2d53336c240cc92f7ac478ca6dba3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/ba91fb3fe52248f88e7cd5751366422a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/281f923bb9bb4700adb2e49f5497154e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=35.5 K 2024-11-20T17:22:42,867 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b is 175, key is test_row_0/A:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:42,867 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f7a2d53336c240cc92f7ac478ca6dba3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123357254 2024-11-20T17:22:42,867 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ba91fb3fe52248f88e7cd5751366422a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732123358389 2024-11-20T17:22:42,868 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 281f923bb9bb4700adb2e49f5497154e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123360564 2024-11-20T17:22:42,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742199_1375 (size=31413) 2024-11-20T17:22:42,877 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b 2024-11-20T17:22:42,878 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#C#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:42,878 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/831dd30617fb451cb54d6b61695a640b is 50, key is test_row_0/C:col10/1732123361176/Put/seqid=0 2024-11-20T17:22:42,884 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/A of ac48603af62441d2defd4d588b3226cb into 4c1be3bf4ebc4311be0f23ac8b9f6f9b(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:42,884 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:42,884 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/A, priority=13, startTime=1732123362439; duration=0sec 2024-11-20T17:22:42,885 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:42,885 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:A 2024-11-20T17:22:42,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742200_1376 (size=12459) 2024-11-20T17:22:42,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:42,953 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fd11a84b8b934dfa9f76e4f77fc2c29c_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fd11a84b8b934dfa9f76e4f77fc2c29c_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:42,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/30c6b37a9c43471fb81af1a6964e2e54, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:42,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/30c6b37a9c43471fb81af1a6964e2e54 is 175, key is test_row_0/A:col10/1732123361188/Put/seqid=0 2024-11-20T17:22:42,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742201_1377 (size=31105) 2024-11-20T17:22:42,960 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=166, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/30c6b37a9c43471fb81af1a6964e2e54 2024-11-20T17:22:42,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/88c0652c19bc4914abffbca9ff19201d is 50, key is test_row_0/B:col10/1732123361188/Put/seqid=0 2024-11-20T17:22:42,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742202_1378 (size=12151) 2024-11-20T17:22:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:43,294 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/831dd30617fb451cb54d6b61695a640b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/831dd30617fb451cb54d6b61695a640b 2024-11-20T17:22:43,299 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/C of ac48603af62441d2defd4d588b3226cb into 831dd30617fb451cb54d6b61695a640b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:43,299 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:43,299 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/C, priority=13, startTime=1732123362439; duration=0sec 2024-11-20T17:22:43,299 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:43,299 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:C 2024-11-20T17:22:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:43,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:43,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123423344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123423345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123423345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123423346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,374 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/88c0652c19bc4914abffbca9ff19201d 2024-11-20T17:22:43,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/faa6e885179c443e804103f591befcc9 is 50, key is test_row_0/C:col10/1732123361188/Put/seqid=0 2024-11-20T17:22:43,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742203_1379 (size=12151) 2024-11-20T17:22:43,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123423451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123423453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123423453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123423455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123423656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123423659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123423659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123423660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,786 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/faa6e885179c443e804103f591befcc9 2024-11-20T17:22:43,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/30c6b37a9c43471fb81af1a6964e2e54 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54 2024-11-20T17:22:43,802 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54, entries=150, sequenceid=166, filesize=30.4 K 2024-11-20T17:22:43,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/88c0652c19bc4914abffbca9ff19201d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/88c0652c19bc4914abffbca9ff19201d 2024-11-20T17:22:43,806 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/88c0652c19bc4914abffbca9ff19201d, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T17:22:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/faa6e885179c443e804103f591befcc9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/faa6e885179c443e804103f591befcc9 2024-11-20T17:22:43,813 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/faa6e885179c443e804103f591befcc9, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T17:22:43,815 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ac48603af62441d2defd4d588b3226cb in 1277ms, sequenceid=166, compaction requested=false 2024-11-20T17:22:43,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:43,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:43,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T17:22:43,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T17:22:43,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T17:22:43,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6530 sec 2024-11-20T17:22:43,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.6600 sec 2024-11-20T17:22:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:43,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:22:43,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:43,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:43,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:43,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:43,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:43,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:43,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e6b645b975c497e9a22f7872a570ff1_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:43,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742204_1380 (size=14794) 2024-11-20T17:22:43,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123423973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123423977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123423977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:43,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123423978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123424079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123424081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123424082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123424083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123424283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123424285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123424285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123424287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,375 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:44,378 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e6b645b975c497e9a22f7872a570ff1_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e6b645b975c497e9a22f7872a570ff1_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:44,379 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6713f07724a4ec5beaf50dbaba89cd5, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:44,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6713f07724a4ec5beaf50dbaba89cd5 is 175, key is test_row_0/A:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:44,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742205_1381 (size=39749) 2024-11-20T17:22:44,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123424587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123424592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123424592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123424593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:44,788 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6713f07724a4ec5beaf50dbaba89cd5 2024-11-20T17:22:44,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/c3e61e4d0f8b432496017273591a2880 is 50, key is test_row_0/B:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:44,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742206_1382 (size=12151) 2024-11-20T17:22:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123425094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123425095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:45,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123425098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:45,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:45,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123425102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:45,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/c3e61e4d0f8b432496017273591a2880 2024-11-20T17:22:45,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/460d97f51dbb4dbaa6fa4fe92e5f9643 is 50, key is test_row_0/C:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:45,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742207_1383 (size=12151) 2024-11-20T17:22:45,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:22:45,268 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T17:22:45,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:45,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T17:22:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T17:22:45,271 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:45,271 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:45,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:45,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T17:22:45,423 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:45,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T17:22:45,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:45,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:45,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:45,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:45,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:45,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32928 deadline: 1732123425441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:45,445 DEBUG [Thread-1524 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8196 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:22:45,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T17:22:45,576 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:45,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T17:22:45,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:45,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:45,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:45,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:45,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/460d97f51dbb4dbaa6fa4fe92e5f9643 2024-11-20T17:22:45,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/e6713f07724a4ec5beaf50dbaba89cd5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5 2024-11-20T17:22:45,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5, entries=200, sequenceid=195, filesize=38.8 K 2024-11-20T17:22:45,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/c3e61e4d0f8b432496017273591a2880 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/c3e61e4d0f8b432496017273591a2880 2024-11-20T17:22:45,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/c3e61e4d0f8b432496017273591a2880, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T17:22:45,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/460d97f51dbb4dbaa6fa4fe92e5f9643 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/460d97f51dbb4dbaa6fa4fe92e5f9643 2024-11-20T17:22:45,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/460d97f51dbb4dbaa6fa4fe92e5f9643, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T17:22:45,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ac48603af62441d2defd4d588b3226cb in 1674ms, sequenceid=195, compaction requested=true 2024-11-20T17:22:45,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:45,638 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:45,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:45,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:45,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:45,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:45,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:45,638 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:45,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:45,639 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:45,639 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/A is initiating minor compaction (all files) 2024-11-20T17:22:45,639 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/A in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,639 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=99.9 K 2024-11-20T17:22:45,639 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,639 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5] 2024-11-20T17:22:45,639 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c1be3bf4ebc4311be0f23ac8b9f6f9b, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123360564 2024-11-20T17:22:45,640 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:45,640 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/B is initiating minor compaction (all files) 2024-11-20T17:22:45,640 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/B in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,640 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30c6b37a9c43471fb81af1a6964e2e54, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732123361183 2024-11-20T17:22:45,640 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/78871194c43748a2a5c4369a8bb880ed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/88c0652c19bc4914abffbca9ff19201d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/c3e61e4d0f8b432496017273591a2880] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=35.9 K 2024-11-20T17:22:45,640 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6713f07724a4ec5beaf50dbaba89cd5, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123363344 2024-11-20T17:22:45,640 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 78871194c43748a2a5c4369a8bb880ed, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123360564 2024-11-20T17:22:45,641 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 88c0652c19bc4914abffbca9ff19201d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732123361183 2024-11-20T17:22:45,641 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c3e61e4d0f8b432496017273591a2880, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123363344 2024-11-20T17:22:45,652 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:45,654 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#B#compaction#326 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:45,655 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/5ef2a5c68c0a46b9a5a35944d59244bb is 50, key is test_row_0/B:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:45,660 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411201b8f89c1ccfe4cab85956e5181918754_ac48603af62441d2defd4d588b3226cb store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:45,664 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411201b8f89c1ccfe4cab85956e5181918754_ac48603af62441d2defd4d588b3226cb, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:45,664 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201b8f89c1ccfe4cab85956e5181918754_ac48603af62441d2defd4d588b3226cb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:45,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742209_1385 (size=4469) 2024-11-20T17:22:45,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742208_1384 (size=12561) 2024-11-20T17:22:45,675 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/5ef2a5c68c0a46b9a5a35944d59244bb as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/5ef2a5c68c0a46b9a5a35944d59244bb 2024-11-20T17:22:45,682 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/B of ac48603af62441d2defd4d588b3226cb into 5ef2a5c68c0a46b9a5a35944d59244bb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:45,682 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:45,683 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/B, priority=13, startTime=1732123365638; duration=0sec 2024-11-20T17:22:45,683 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:45,683 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:B 2024-11-20T17:22:45,683 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:45,684 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:45,684 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/C is initiating minor compaction (all files) 2024-11-20T17:22:45,684 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/C in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,684 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/831dd30617fb451cb54d6b61695a640b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/faa6e885179c443e804103f591befcc9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/460d97f51dbb4dbaa6fa4fe92e5f9643] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=35.9 K 2024-11-20T17:22:45,685 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 831dd30617fb451cb54d6b61695a640b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732123360564 2024-11-20T17:22:45,685 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting faa6e885179c443e804103f591befcc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732123361183 2024-11-20T17:22:45,685 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 460d97f51dbb4dbaa6fa4fe92e5f9643, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123363344 2024-11-20T17:22:45,692 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#C#compaction#327 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:45,692 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/c19c7519d98c4a26b804ac7a8b69f7a4 is 50, key is test_row_0/C:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742210_1386 (size=12561) 2024-11-20T17:22:45,701 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/c19c7519d98c4a26b804ac7a8b69f7a4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/c19c7519d98c4a26b804ac7a8b69f7a4 2024-11-20T17:22:45,706 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/C of ac48603af62441d2defd4d588b3226cb into c19c7519d98c4a26b804ac7a8b69f7a4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:45,706 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:45,706 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/C, priority=13, startTime=1732123365638; duration=0sec 2024-11-20T17:22:45,706 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:45,706 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:C 2024-11-20T17:22:45,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:45,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T17:22:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:45,729 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:22:45,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:45,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:45,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:45,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:45,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:45,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:45,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112053bb73e19cc747cf9eb8fe7ca1389777_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123363976/Put/seqid=0 2024-11-20T17:22:45,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742211_1387 (size=12304) 2024-11-20T17:22:45,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:45,748 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112053bb73e19cc747cf9eb8fe7ca1389777_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112053bb73e19cc747cf9eb8fe7ca1389777_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:45,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4b8372eb614e4c95aea285e580191bf5, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:45,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4b8372eb614e4c95aea285e580191bf5 is 175, key is test_row_0/A:col10/1732123363976/Put/seqid=0 2024-11-20T17:22:45,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742212_1388 (size=31105) 2024-11-20T17:22:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T17:22:46,070 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#A#compaction#325 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:46,070 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/d72fc5190a144efca8239158596e55b1 is 175, key is test_row_0/A:col10/1732123363961/Put/seqid=0 2024-11-20T17:22:46,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742213_1389 (size=31515) 2024-11-20T17:22:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:46,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:46,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123426137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123426138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123426141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123426142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,155 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4b8372eb614e4c95aea285e580191bf5 2024-11-20T17:22:46,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/f0c426eb96044fc69746dbf21371e56d is 50, key is test_row_0/B:col10/1732123363976/Put/seqid=0 2024-11-20T17:22:46,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742214_1390 (size=12151) 2024-11-20T17:22:46,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123426243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123426243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123426243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123426247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T17:22:46,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123426448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123426448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123426448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123426453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,478 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/d72fc5190a144efca8239158596e55b1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d72fc5190a144efca8239158596e55b1 2024-11-20T17:22:46,482 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/A of ac48603af62441d2defd4d588b3226cb into d72fc5190a144efca8239158596e55b1(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:46,482 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:46,482 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/A, priority=13, startTime=1732123365637; duration=0sec 2024-11-20T17:22:46,482 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:46,482 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:A 2024-11-20T17:22:46,565 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/f0c426eb96044fc69746dbf21371e56d 2024-11-20T17:22:46,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/5f0ab29f95e740a9a3b3a0cdbeed5341 is 50, key is test_row_0/C:col10/1732123363976/Put/seqid=0 2024-11-20T17:22:46,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742215_1391 (size=12151) 2024-11-20T17:22:46,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123426754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123426754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123426756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:46,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123426759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:46,977 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/5f0ab29f95e740a9a3b3a0cdbeed5341 2024-11-20T17:22:46,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4b8372eb614e4c95aea285e580191bf5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5 2024-11-20T17:22:46,984 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5, entries=150, sequenceid=207, filesize=30.4 K 2024-11-20T17:22:46,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/f0c426eb96044fc69746dbf21371e56d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f0c426eb96044fc69746dbf21371e56d 2024-11-20T17:22:46,988 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f0c426eb96044fc69746dbf21371e56d, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T17:22:46,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/5f0ab29f95e740a9a3b3a0cdbeed5341 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5f0ab29f95e740a9a3b3a0cdbeed5341 2024-11-20T17:22:46,992 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5f0ab29f95e740a9a3b3a0cdbeed5341, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T17:22:46,993 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for ac48603af62441d2defd4d588b3226cb in 1264ms, sequenceid=207, compaction requested=false 2024-11-20T17:22:46,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:46,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:46,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T17:22:46,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T17:22:46,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T17:22:46,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7230 sec 2024-11-20T17:22:46,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.7280 sec 2024-11-20T17:22:47,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:47,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T17:22:47,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:47,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:47,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:47,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:47,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:47,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:47,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123427271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123427268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123427271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ee4dd3bd024c4747a142ce6a22128e01_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:47,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123427272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742216_1392 (size=14794) 2024-11-20T17:22:47,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T17:22:47,375 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T17:22:47,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123427373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,376 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:47,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123427373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T17:22:47,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:47,377 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:47,378 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:47,378 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:47,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123427375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123427378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:47,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:47,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:47,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:47,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123427577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123427578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123427580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123427583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:47,679 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:47,682 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:47,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,684 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ee4dd3bd024c4747a142ce6a22128e01_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ee4dd3bd024c4747a142ce6a22128e01_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:47,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,684 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/beb80e5d24774926ab98379d30326955, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:47,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/beb80e5d24774926ab98379d30326955 is 175, key is test_row_0/A:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:47,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742217_1393 (size=39749) 2024-11-20T17:22:47,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:47,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:47,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:47,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123427881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123427883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123427885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:47,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123427886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:47,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:47,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:47,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:47,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:47,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:47,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:47,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,090 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/beb80e5d24774926ab98379d30326955 2024-11-20T17:22:48,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/0e6d4738092b4bd0bba7bd87647e1496 is 50, key is test_row_0/B:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:48,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742218_1394 (size=12151) 2024-11-20T17:22:48,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:48,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:48,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:48,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:48,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:48,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:48,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123428388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:48,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123428390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:48,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:48,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123428394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:48,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:48,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123428394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:48,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:48,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:48,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:48,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,447 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:22:48,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:48,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/0e6d4738092b4bd0bba7bd87647e1496 2024-11-20T17:22:48,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/706b982889034527a9a9443262bed801 is 50, key is test_row_0/C:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:48,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742219_1395 (size=12151) 2024-11-20T17:22:48,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/706b982889034527a9a9443262bed801 2024-11-20T17:22:48,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/beb80e5d24774926ab98379d30326955 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955 2024-11-20T17:22:48,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955, entries=200, sequenceid=235, filesize=38.8 K 2024-11-20T17:22:48,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/0e6d4738092b4bd0bba7bd87647e1496 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0e6d4738092b4bd0bba7bd87647e1496 2024-11-20T17:22:48,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0e6d4738092b4bd0bba7bd87647e1496, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T17:22:48,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/706b982889034527a9a9443262bed801 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/706b982889034527a9a9443262bed801 2024-11-20T17:22:48,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/706b982889034527a9a9443262bed801, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T17:22:48,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ac48603af62441d2defd4d588b3226cb in 1264ms, sequenceid=235, compaction requested=true 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:48,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:48,528 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:48,528 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:48,530 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:48,530 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:48,530 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/A is initiating minor compaction (all files) 2024-11-20T17:22:48,530 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/B is initiating minor compaction (all files) 2024-11-20T17:22:48,530 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/A in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,530 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/B in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,530 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d72fc5190a144efca8239158596e55b1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=100.0 K 2024-11-20T17:22:48,530 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/5ef2a5c68c0a46b9a5a35944d59244bb, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f0c426eb96044fc69746dbf21371e56d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0e6d4738092b4bd0bba7bd87647e1496] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=36.0 K 2024-11-20T17:22:48,530 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,530 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d72fc5190a144efca8239158596e55b1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955] 2024-11-20T17:22:48,530 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ef2a5c68c0a46b9a5a35944d59244bb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123363344 2024-11-20T17:22:48,531 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d72fc5190a144efca8239158596e55b1, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123363344 2024-11-20T17:22:48,531 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f0c426eb96044fc69746dbf21371e56d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732123363970 2024-11-20T17:22:48,531 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b8372eb614e4c95aea285e580191bf5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732123363970 2024-11-20T17:22:48,531 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e6d4738092b4bd0bba7bd87647e1496, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732123366128 2024-11-20T17:22:48,531 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting beb80e5d24774926ab98379d30326955, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732123366128 2024-11-20T17:22:48,539 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:48,539 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#B#compaction#334 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:48,540 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/ca0506920f4741668e7c05952c318629 is 50, key is test_row_0/B:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:48,540 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112013673d041aaf453e96889353fc384b94_ac48603af62441d2defd4d588b3226cb store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:48,542 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112013673d041aaf453e96889353fc384b94_ac48603af62441d2defd4d588b3226cb, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:48,542 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112013673d041aaf453e96889353fc384b94_ac48603af62441d2defd4d588b3226cb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:48,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742220_1396 (size=12663) 2024-11-20T17:22:48,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742221_1397 (size=4469) 2024-11-20T17:22:48,598 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:48,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:22:48,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,599 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T17:22:48,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:48,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:48,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:48,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:48,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:48,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:48,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d797ebd291644906883fe6f997a42f1b_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123367270/Put/seqid=0 2024-11-20T17:22:48,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742222_1398 (size=12304) 2024-11-20T17:22:48,950 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#A#compaction#335 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:48,951 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4bd3e620fd2a46c1bd9a96b71196f5d0 is 175, key is test_row_0/A:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:48,953 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/ca0506920f4741668e7c05952c318629 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ca0506920f4741668e7c05952c318629 2024-11-20T17:22:48,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742223_1399 (size=31617) 2024-11-20T17:22:48,958 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/B of ac48603af62441d2defd4d588b3226cb into ca0506920f4741668e7c05952c318629(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:48,958 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:48,958 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/B, priority=13, startTime=1732123368528; duration=0sec 2024-11-20T17:22:48,958 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:48,958 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:B 2024-11-20T17:22:48,958 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:48,959 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:48,959 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/C is initiating minor compaction (all files) 2024-11-20T17:22:48,959 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/C in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:48,959 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/c19c7519d98c4a26b804ac7a8b69f7a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5f0ab29f95e740a9a3b3a0cdbeed5341, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/706b982889034527a9a9443262bed801] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=36.0 K 2024-11-20T17:22:48,959 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c19c7519d98c4a26b804ac7a8b69f7a4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732123363344 2024-11-20T17:22:48,960 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f0ab29f95e740a9a3b3a0cdbeed5341, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732123363970 2024-11-20T17:22:48,960 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 706b982889034527a9a9443262bed801, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732123366128 2024-11-20T17:22:48,965 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#C#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:48,965 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/598badb8e00449808debf4a1a9ddb23c is 50, key is test_row_0/C:col10/1732123367262/Put/seqid=0 2024-11-20T17:22:48,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742224_1400 (size=12663) 2024-11-20T17:22:49,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:49,015 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d797ebd291644906883fe6f997a42f1b_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d797ebd291644906883fe6f997a42f1b_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:49,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/a84df78f016f4c3e954731392fb9e3ce, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:49,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/a84df78f016f4c3e954731392fb9e3ce is 175, key is test_row_0/A:col10/1732123367270/Put/seqid=0 2024-11-20T17:22:49,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742225_1401 (size=31105) 2024-11-20T17:22:49,020 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/a84df78f016f4c3e954731392fb9e3ce 2024-11-20T17:22:49,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/cdf11aed3af8479eb018d6dc6d96aa68 is 50, key is test_row_0/B:col10/1732123367270/Put/seqid=0 2024-11-20T17:22:49,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742226_1402 (size=12151) 2024-11-20T17:22:49,365 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/4bd3e620fd2a46c1bd9a96b71196f5d0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4bd3e620fd2a46c1bd9a96b71196f5d0 2024-11-20T17:22:49,370 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/A of ac48603af62441d2defd4d588b3226cb into 4bd3e620fd2a46c1bd9a96b71196f5d0(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:49,370 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:49,370 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/A, priority=13, startTime=1732123368528; duration=0sec 2024-11-20T17:22:49,370 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:49,370 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:A 2024-11-20T17:22:49,379 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/598badb8e00449808debf4a1a9ddb23c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/598badb8e00449808debf4a1a9ddb23c 2024-11-20T17:22:49,383 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/C of ac48603af62441d2defd4d588b3226cb into 598badb8e00449808debf4a1a9ddb23c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:49,383 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:49,383 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/C, priority=13, startTime=1732123368528; duration=0sec 2024-11-20T17:22:49,383 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:49,383 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:C 2024-11-20T17:22:49,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:49,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. as already flushing 2024-11-20T17:22:49,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123429427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,433 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/cdf11aed3af8479eb018d6dc6d96aa68 2024-11-20T17:22:49,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123429429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123429430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123429431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/5d6e34e03d3d4a4ab6d37b811a3b9b18 is 50, key is test_row_0/C:col10/1732123367270/Put/seqid=0 2024-11-20T17:22:49,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742227_1403 (size=12151) 2024-11-20T17:22:49,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:49,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123429532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123429536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123429537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123429538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123429739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123429740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123429742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123429743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:49,844 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/5d6e34e03d3d4a4ab6d37b811a3b9b18 2024-11-20T17:22:49,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/a84df78f016f4c3e954731392fb9e3ce as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce 2024-11-20T17:22:49,851 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce, entries=150, sequenceid=244, filesize=30.4 K 2024-11-20T17:22:49,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/cdf11aed3af8479eb018d6dc6d96aa68 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/cdf11aed3af8479eb018d6dc6d96aa68 2024-11-20T17:22:49,855 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/cdf11aed3af8479eb018d6dc6d96aa68, entries=150, sequenceid=244, filesize=11.9 K 2024-11-20T17:22:49,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/5d6e34e03d3d4a4ab6d37b811a3b9b18 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5d6e34e03d3d4a4ab6d37b811a3b9b18 2024-11-20T17:22:49,859 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5d6e34e03d3d4a4ab6d37b811a3b9b18, entries=150, sequenceid=244, filesize=11.9 K 2024-11-20T17:22:49,859 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for ac48603af62441d2defd4d588b3226cb in 1260ms, sequenceid=244, compaction requested=false 2024-11-20T17:22:49,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:49,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:49,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T17:22:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T17:22:49,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T17:22:49,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4830 sec 2024-11-20T17:22:49,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.4860 sec 2024-11-20T17:22:50,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:50,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T17:22:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:50,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120293ac6c4175a4414acd817ea8d0b05ec_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:50,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123430048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123430049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123430050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123430051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742228_1404 (size=14994) 2024-11-20T17:22:50,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123430152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123430153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123430153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123430155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123430355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123430356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123430357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123430359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:50,461 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120293ac6c4175a4414acd817ea8d0b05ec_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120293ac6c4175a4414acd817ea8d0b05ec_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:50,462 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/d184f931bf034259a1f0f5591022f00a, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:50,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/d184f931bf034259a1f0f5591022f00a is 175, key is test_row_0/A:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:50,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742229_1405 (size=39949) 2024-11-20T17:22:50,627 DEBUG [Thread-1529 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:55266 2024-11-20T17:22:50,627 DEBUG [Thread-1529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:50,629 DEBUG [Thread-1533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:55266 2024-11-20T17:22:50,629 DEBUG [Thread-1533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:50,632 DEBUG [Thread-1531 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:55266 2024-11-20T17:22:50,632 DEBUG [Thread-1531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:50,633 DEBUG [Thread-1537 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68035c67 to 127.0.0.1:55266 2024-11-20T17:22:50,633 DEBUG [Thread-1535 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:55266 2024-11-20T17:22:50,633 DEBUG [Thread-1537 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:50,633 DEBUG [Thread-1535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:50,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123430660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123430660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123430660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123430664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:50,867 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/d184f931bf034259a1f0f5591022f00a 2024-11-20T17:22:50,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/ec0caf38515f4192988c8ee6769b016d is 50, key is test_row_0/B:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742230_1406 (size=12301) 2024-11-20T17:22:51,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32930 deadline: 1732123431162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:51,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32994 deadline: 1732123431163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:51,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32972 deadline: 1732123431165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:51,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:51,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33018 deadline: 1732123431168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:51,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/ec0caf38515f4192988c8ee6769b016d 2024-11-20T17:22:51,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/8eed907ccc5a4cef96c44fd38e20812d is 50, key is test_row_0/C:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:51,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742231_1407 (size=12301) 2024-11-20T17:22:51,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:22:51,482 INFO [Thread-1528 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T17:22:51,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/8eed907ccc5a4cef96c44fd38e20812d 2024-11-20T17:22:51,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/d184f931bf034259a1f0f5591022f00a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a 2024-11-20T17:22:51,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a, entries=200, sequenceid=276, filesize=39.0 K 2024-11-20T17:22:51,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/ec0caf38515f4192988c8ee6769b016d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ec0caf38515f4192988c8ee6769b016d 2024-11-20T17:22:51,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ec0caf38515f4192988c8ee6769b016d, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T17:22:51,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/8eed907ccc5a4cef96c44fd38e20812d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/8eed907ccc5a4cef96c44fd38e20812d 2024-11-20T17:22:51,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/8eed907ccc5a4cef96c44fd38e20812d, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T17:22:51,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for ac48603af62441d2defd4d588b3226cb in 1658ms, sequenceid=276, compaction requested=true 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:22:51,702 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ac48603af62441d2defd4d588b3226cb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:22:51,702 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:51,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:51,703 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:51,703 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:51,703 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/B is initiating minor compaction (all files) 2024-11-20T17:22:51,703 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/A is initiating minor compaction (all files) 2024-11-20T17:22:51,703 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/B in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:51,703 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/A in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:51,703 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ca0506920f4741668e7c05952c318629, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/cdf11aed3af8479eb018d6dc6d96aa68, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ec0caf38515f4192988c8ee6769b016d] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=36.2 K 2024-11-20T17:22:51,703 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4bd3e620fd2a46c1bd9a96b71196f5d0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=100.3 K 2024-11-20T17:22:51,703 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:51,703 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4bd3e620fd2a46c1bd9a96b71196f5d0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a] 2024-11-20T17:22:51,704 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ca0506920f4741668e7c05952c318629, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732123366128 2024-11-20T17:22:51,704 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bd3e620fd2a46c1bd9a96b71196f5d0, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732123366128 2024-11-20T17:22:51,704 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting cdf11aed3af8479eb018d6dc6d96aa68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732123367266 2024-11-20T17:22:51,704 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a84df78f016f4c3e954731392fb9e3ce, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732123367266 2024-11-20T17:22:51,704 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ec0caf38515f4192988c8ee6769b016d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732123369420 2024-11-20T17:22:51,704 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d184f931bf034259a1f0f5591022f00a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732123369420 2024-11-20T17:22:51,710 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#B#compaction#343 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:51,711 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/2a95be18b07241f6a069828d71e69055 is 50, key is test_row_0/B:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:51,713 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:51,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742232_1408 (size=12915) 2024-11-20T17:22:51,715 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112013eab0fffc3c45d0a9dbf49f83b95ac6_ac48603af62441d2defd4d588b3226cb store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:51,743 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112013eab0fffc3c45d0a9dbf49f83b95ac6_ac48603af62441d2defd4d588b3226cb, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:51,744 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112013eab0fffc3c45d0a9dbf49f83b95ac6_ac48603af62441d2defd4d588b3226cb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:51,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742233_1409 (size=4469) 2024-11-20T17:22:52,119 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/2a95be18b07241f6a069828d71e69055 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/2a95be18b07241f6a069828d71e69055 2024-11-20T17:22:52,123 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/B of ac48603af62441d2defd4d588b3226cb into 2a95be18b07241f6a069828d71e69055(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:52,123 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:52,123 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/B, priority=13, startTime=1732123371702; duration=0sec 2024-11-20T17:22:52,123 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:22:52,123 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:B 2024-11-20T17:22:52,123 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:22:52,124 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:22:52,124 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): ac48603af62441d2defd4d588b3226cb/C is initiating minor compaction (all files) 2024-11-20T17:22:52,124 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ac48603af62441d2defd4d588b3226cb/C in TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:52,124 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/598badb8e00449808debf4a1a9ddb23c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5d6e34e03d3d4a4ab6d37b811a3b9b18, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/8eed907ccc5a4cef96c44fd38e20812d] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp, totalSize=36.2 K 2024-11-20T17:22:52,124 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 598badb8e00449808debf4a1a9ddb23c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732123366128 2024-11-20T17:22:52,124 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d6e34e03d3d4a4ab6d37b811a3b9b18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732123367266 2024-11-20T17:22:52,125 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 8eed907ccc5a4cef96c44fd38e20812d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732123369420 2024-11-20T17:22:52,131 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#C#compaction#345 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:52,131 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/99ce8b22640944f693d550ad30251bc0 is 50, key is test_row_0/C:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:52,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742234_1410 (size=12915) 2024-11-20T17:22:52,149 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ac48603af62441d2defd4d588b3226cb#A#compaction#344 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:22:52,149 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/699ce21539c64b9fb33c5e6286e1cae1 is 175, key is test_row_0/A:col10/1732123369420/Put/seqid=0 2024-11-20T17:22:52,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742235_1411 (size=31869) 2024-11-20T17:22:52,166 DEBUG [Thread-1520 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:55266 2024-11-20T17:22:52,166 DEBUG [Thread-1520 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:52,168 DEBUG [Thread-1518 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:55266 2024-11-20T17:22:52,168 DEBUG [Thread-1518 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:52,168 DEBUG [Thread-1526 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:55266 2024-11-20T17:22:52,168 DEBUG [Thread-1526 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:52,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:22:52,177 DEBUG [Thread-1522 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:55266 2024-11-20T17:22:52,177 DEBUG [Thread-1522 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:52,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:52,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:52,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:52,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:52,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:52,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:52,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d5a812b5669b44c0acc3f00099a60e4c_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_0/A:col10/1732123370049/Put/seqid=0 2024-11-20T17:22:52,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742236_1412 (size=12454) 2024-11-20T17:22:52,538 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/99ce8b22640944f693d550ad30251bc0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/99ce8b22640944f693d550ad30251bc0 2024-11-20T17:22:52,542 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/C of ac48603af62441d2defd4d588b3226cb into 99ce8b22640944f693d550ad30251bc0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:52,542 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:52,542 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/C, priority=13, startTime=1732123371702; duration=0sec 2024-11-20T17:22:52,542 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:52,542 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:C 2024-11-20T17:22:52,556 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/699ce21539c64b9fb33c5e6286e1cae1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/699ce21539c64b9fb33c5e6286e1cae1 2024-11-20T17:22:52,560 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ac48603af62441d2defd4d588b3226cb/A of ac48603af62441d2defd4d588b3226cb into 699ce21539c64b9fb33c5e6286e1cae1(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:22:52,560 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:52,560 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb., storeName=ac48603af62441d2defd4d588b3226cb/A, priority=13, startTime=1732123371702; duration=0sec 2024-11-20T17:22:52,560 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:22:52,560 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ac48603af62441d2defd4d588b3226cb:A 2024-11-20T17:22:52,586 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:52,589 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d5a812b5669b44c0acc3f00099a60e4c_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d5a812b5669b44c0acc3f00099a60e4c_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:52,589 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/04666f71fa1546878a900c7bd6f2c0c5, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:52,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/04666f71fa1546878a900c7bd6f2c0c5 is 175, key is test_row_0/A:col10/1732123370049/Put/seqid=0 2024-11-20T17:22:52,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742237_1413 (size=31255) 2024-11-20T17:22:52,993 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/04666f71fa1546878a900c7bd6f2c0c5 2024-11-20T17:22:53,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/29eb530d056242b8a8de64609b49a9c4 is 50, key is test_row_0/B:col10/1732123370049/Put/seqid=0 2024-11-20T17:22:53,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742238_1414 (size=12301) 2024-11-20T17:22:53,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/29eb530d056242b8a8de64609b49a9c4 2024-11-20T17:22:53,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/9a892ae8eedf4032a31250acfcb51432 is 50, key is test_row_0/C:col10/1732123370049/Put/seqid=0 2024-11-20T17:22:53,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742239_1415 (size=12301) 2024-11-20T17:22:53,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/9a892ae8eedf4032a31250acfcb51432 2024-11-20T17:22:53,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/04666f71fa1546878a900c7bd6f2c0c5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/04666f71fa1546878a900c7bd6f2c0c5 2024-11-20T17:22:53,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/04666f71fa1546878a900c7bd6f2c0c5, entries=150, sequenceid=288, filesize=30.5 K 2024-11-20T17:22:53,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/29eb530d056242b8a8de64609b49a9c4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/29eb530d056242b8a8de64609b49a9c4 2024-11-20T17:22:53,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/29eb530d056242b8a8de64609b49a9c4, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T17:22:53,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/9a892ae8eedf4032a31250acfcb51432 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/9a892ae8eedf4032a31250acfcb51432 2024-11-20T17:22:53,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/9a892ae8eedf4032a31250acfcb51432, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T17:22:53,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for ac48603af62441d2defd4d588b3226cb in 1650ms, sequenceid=288, compaction requested=false 2024-11-20T17:22:53,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:55,506 DEBUG [Thread-1524 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:55266 2024-11-20T17:22:55,506 DEBUG [Thread-1524 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:55,506 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:22:55,506 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-11-20T17:22:55,506 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-20T17:22:55,506 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-20T17:22:55,506 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2724 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8172 rows 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2701 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8103 rows 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2730 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8190 rows 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2717 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8151 rows 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2698 2024-11-20T17:22:55,507 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8094 rows 2024-11-20T17:22:55,507 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:22:55,507 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:55266 2024-11-20T17:22:55,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:22:55,508 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:22:55,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:22:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:22:55,513 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123375512"}]},"ts":"1732123375512"} 2024-11-20T17:22:55,514 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:22:55,517 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:22:55,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:22:55,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, UNASSIGN}] 2024-11-20T17:22:55,518 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, UNASSIGN 2024-11-20T17:22:55,519 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:55,520 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:22:55,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:22:55,671 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:55,671 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:55,671 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:22:55,671 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing ac48603af62441d2defd4d588b3226cb, disabling compactions & flushes 2024-11-20T17:22:55,671 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. after waiting 0 ms 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:55,672 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing ac48603af62441d2defd4d588b3226cb 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=A 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=B 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ac48603af62441d2defd4d588b3226cb, store=C 2024-11-20T17:22:55,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:55,677 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dfc5729ffe4446739434ebfbf12cec85_ac48603af62441d2defd4d588b3226cb is 50, key is test_row_2/A:col10/1732123375505/Put/seqid=0 2024-11-20T17:22:55,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742240_1416 (size=7374) 2024-11-20T17:22:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:22:56,081 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:56,084 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dfc5729ffe4446739434ebfbf12cec85_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dfc5729ffe4446739434ebfbf12cec85_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:56,085 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/2c521f80de314bf6857e96a86cf470aa, store: [table=TestAcidGuarantees family=A region=ac48603af62441d2defd4d588b3226cb] 2024-11-20T17:22:56,085 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/2c521f80de314bf6857e96a86cf470aa is 175, key is test_row_2/A:col10/1732123375505/Put/seqid=0 2024-11-20T17:22:56,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742241_1417 (size=13865) 2024-11-20T17:22:56,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:22:56,489 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/2c521f80de314bf6857e96a86cf470aa 2024-11-20T17:22:56,495 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/672b078888214b1abc681fc13efc2ad4 is 50, key is test_row_2/B:col10/1732123375505/Put/seqid=0 2024-11-20T17:22:56,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742242_1418 (size=7415) 2024-11-20T17:22:56,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:22:56,899 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/672b078888214b1abc681fc13efc2ad4 2024-11-20T17:22:56,904 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/d78500ccff4b47e9bdeede00af1a2692 is 50, key is test_row_2/C:col10/1732123375505/Put/seqid=0 2024-11-20T17:22:56,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742243_1419 (size=7415) 2024-11-20T17:22:57,307 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/d78500ccff4b47e9bdeede00af1a2692 2024-11-20T17:22:57,311 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/A/2c521f80de314bf6857e96a86cf470aa as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/2c521f80de314bf6857e96a86cf470aa 2024-11-20T17:22:57,314 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/2c521f80de314bf6857e96a86cf470aa, entries=50, sequenceid=294, filesize=13.5 K 2024-11-20T17:22:57,314 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/B/672b078888214b1abc681fc13efc2ad4 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/672b078888214b1abc681fc13efc2ad4 2024-11-20T17:22:57,317 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/672b078888214b1abc681fc13efc2ad4, entries=50, sequenceid=294, filesize=7.2 K 2024-11-20T17:22:57,317 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/.tmp/C/d78500ccff4b47e9bdeede00af1a2692 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d78500ccff4b47e9bdeede00af1a2692 2024-11-20T17:22:57,320 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d78500ccff4b47e9bdeede00af1a2692, entries=50, sequenceid=294, filesize=7.2 K 2024-11-20T17:22:57,321 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for ac48603af62441d2defd4d588b3226cb in 1649ms, sequenceid=294, compaction requested=true 2024-11-20T17:22:57,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6f692dd884c84735ac2f673805b2644f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d72fc5190a144efca8239158596e55b1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4bd3e620fd2a46c1bd9a96b71196f5d0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a] to archive 2024-11-20T17:22:57,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:57,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/82abe5dfe8724ea08dd007a03a595c12 2024-11-20T17:22:57,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6b04b2379ac48f19a6ec48752f300a4 2024-11-20T17:22:57,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/bc4c58509ae5415cbdba7da84831bb79 2024-11-20T17:22:57,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/857a7db8647245ab8f1c45333baaf798 2024-11-20T17:22:57,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/85b71758c10a4c22b263f14afebc403e 2024-11-20T17:22:57,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6011360ae98041d1a1c962413ddddcb8 2024-11-20T17:22:57,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6f692dd884c84735ac2f673805b2644f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/6f692dd884c84735ac2f673805b2644f 2024-11-20T17:22:57,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/733c30328fb04be18afa2971cba6669c 2024-11-20T17:22:57,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/5af52cbab59948a4ae7b757a84449fcd 2024-11-20T17:22:57,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4c1be3bf4ebc4311be0f23ac8b9f6f9b 2024-11-20T17:22:57,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/30c6b37a9c43471fb81af1a6964e2e54 2024-11-20T17:22:57,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/e6713f07724a4ec5beaf50dbaba89cd5 2024-11-20T17:22:57,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d72fc5190a144efca8239158596e55b1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d72fc5190a144efca8239158596e55b1 2024-11-20T17:22:57,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4b8372eb614e4c95aea285e580191bf5 2024-11-20T17:22:57,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/beb80e5d24774926ab98379d30326955 2024-11-20T17:22:57,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4bd3e620fd2a46c1bd9a96b71196f5d0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/4bd3e620fd2a46c1bd9a96b71196f5d0 2024-11-20T17:22:57,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/a84df78f016f4c3e954731392fb9e3ce 2024-11-20T17:22:57,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/d184f931bf034259a1f0f5591022f00a 2024-11-20T17:22:57,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/15df3c108658418797aaddb8026a46ee, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/695f9cdee75e4150b7e29118b2e1f93d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/d6e75758adcc44fd99e613437b9a0899, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/be3a593f4f514495a57fec3e86b00cad, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/18060ba758af46689b927b46a865a794, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/e3c0b45fc9af43c9b2c685bf23377bd2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/22fed1946dd64c6aa2d2e2d723b055ae, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f1b2622e8ba141dda6a7ee644fc58644, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/78871194c43748a2a5c4369a8bb880ed, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0bab8670a844488691434327f8801431, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/88c0652c19bc4914abffbca9ff19201d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/5ef2a5c68c0a46b9a5a35944d59244bb, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/c3e61e4d0f8b432496017273591a2880, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f0c426eb96044fc69746dbf21371e56d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ca0506920f4741668e7c05952c318629, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0e6d4738092b4bd0bba7bd87647e1496, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/cdf11aed3af8479eb018d6dc6d96aa68, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ec0caf38515f4192988c8ee6769b016d] to archive 2024-11-20T17:22:57,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:57,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/15df3c108658418797aaddb8026a46ee to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/15df3c108658418797aaddb8026a46ee 2024-11-20T17:22:57,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/695f9cdee75e4150b7e29118b2e1f93d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/695f9cdee75e4150b7e29118b2e1f93d 2024-11-20T17:22:57,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/d6e75758adcc44fd99e613437b9a0899 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/d6e75758adcc44fd99e613437b9a0899 2024-11-20T17:22:57,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/be3a593f4f514495a57fec3e86b00cad to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/be3a593f4f514495a57fec3e86b00cad 2024-11-20T17:22:57,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/18060ba758af46689b927b46a865a794 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/18060ba758af46689b927b46a865a794 2024-11-20T17:22:57,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/e3c0b45fc9af43c9b2c685bf23377bd2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/e3c0b45fc9af43c9b2c685bf23377bd2 2024-11-20T17:22:57,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/22fed1946dd64c6aa2d2e2d723b055ae to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/22fed1946dd64c6aa2d2e2d723b055ae 2024-11-20T17:22:57,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f1b2622e8ba141dda6a7ee644fc58644 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f1b2622e8ba141dda6a7ee644fc58644 2024-11-20T17:22:57,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/78871194c43748a2a5c4369a8bb880ed to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/78871194c43748a2a5c4369a8bb880ed 2024-11-20T17:22:57,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0bab8670a844488691434327f8801431 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0bab8670a844488691434327f8801431 2024-11-20T17:22:57,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/88c0652c19bc4914abffbca9ff19201d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/88c0652c19bc4914abffbca9ff19201d 2024-11-20T17:22:57,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/5ef2a5c68c0a46b9a5a35944d59244bb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/5ef2a5c68c0a46b9a5a35944d59244bb 2024-11-20T17:22:57,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/c3e61e4d0f8b432496017273591a2880 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/c3e61e4d0f8b432496017273591a2880 2024-11-20T17:22:57,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f0c426eb96044fc69746dbf21371e56d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/f0c426eb96044fc69746dbf21371e56d 2024-11-20T17:22:57,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ca0506920f4741668e7c05952c318629 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ca0506920f4741668e7c05952c318629 2024-11-20T17:22:57,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0e6d4738092b4bd0bba7bd87647e1496 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/0e6d4738092b4bd0bba7bd87647e1496 2024-11-20T17:22:57,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/cdf11aed3af8479eb018d6dc6d96aa68 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/cdf11aed3af8479eb018d6dc6d96aa68 2024-11-20T17:22:57,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ec0caf38515f4192988c8ee6769b016d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/ec0caf38515f4192988c8ee6769b016d 2024-11-20T17:22:57,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2fe70a2029db455ca0c7b614e58ec45a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/cd3dd4bab07b4ce69f6470fefc504972, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/110f72ddcf894bde850c1678ac2e602a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d9c7942ccf0c46aabcb5adf11c642469, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2d7d054ec857410db4634535c356fabd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/f7a2d53336c240cc92f7ac478ca6dba3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2da920a518d94968994508ff8119b32c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/ba91fb3fe52248f88e7cd5751366422a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/831dd30617fb451cb54d6b61695a640b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/281f923bb9bb4700adb2e49f5497154e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/faa6e885179c443e804103f591befcc9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/c19c7519d98c4a26b804ac7a8b69f7a4, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/460d97f51dbb4dbaa6fa4fe92e5f9643, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5f0ab29f95e740a9a3b3a0cdbeed5341, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/598badb8e00449808debf4a1a9ddb23c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/706b982889034527a9a9443262bed801, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5d6e34e03d3d4a4ab6d37b811a3b9b18, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/8eed907ccc5a4cef96c44fd38e20812d] to archive 2024-11-20T17:22:57,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:22:57,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2fe70a2029db455ca0c7b614e58ec45a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2fe70a2029db455ca0c7b614e58ec45a 2024-11-20T17:22:57,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/cd3dd4bab07b4ce69f6470fefc504972 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/cd3dd4bab07b4ce69f6470fefc504972 2024-11-20T17:22:57,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/110f72ddcf894bde850c1678ac2e602a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/110f72ddcf894bde850c1678ac2e602a 2024-11-20T17:22:57,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d9c7942ccf0c46aabcb5adf11c642469 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d9c7942ccf0c46aabcb5adf11c642469 2024-11-20T17:22:57,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2d7d054ec857410db4634535c356fabd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2d7d054ec857410db4634535c356fabd 2024-11-20T17:22:57,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/f7a2d53336c240cc92f7ac478ca6dba3 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/f7a2d53336c240cc92f7ac478ca6dba3 2024-11-20T17:22:57,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2da920a518d94968994508ff8119b32c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/2da920a518d94968994508ff8119b32c 2024-11-20T17:22:57,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/ba91fb3fe52248f88e7cd5751366422a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/ba91fb3fe52248f88e7cd5751366422a 2024-11-20T17:22:57,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/831dd30617fb451cb54d6b61695a640b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/831dd30617fb451cb54d6b61695a640b 2024-11-20T17:22:57,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/281f923bb9bb4700adb2e49f5497154e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/281f923bb9bb4700adb2e49f5497154e 2024-11-20T17:22:57,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/faa6e885179c443e804103f591befcc9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/faa6e885179c443e804103f591befcc9 2024-11-20T17:22:57,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/c19c7519d98c4a26b804ac7a8b69f7a4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/c19c7519d98c4a26b804ac7a8b69f7a4 2024-11-20T17:22:57,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/460d97f51dbb4dbaa6fa4fe92e5f9643 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/460d97f51dbb4dbaa6fa4fe92e5f9643 2024-11-20T17:22:57,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5f0ab29f95e740a9a3b3a0cdbeed5341 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5f0ab29f95e740a9a3b3a0cdbeed5341 2024-11-20T17:22:57,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/598badb8e00449808debf4a1a9ddb23c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/598badb8e00449808debf4a1a9ddb23c 2024-11-20T17:22:57,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/706b982889034527a9a9443262bed801 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/706b982889034527a9a9443262bed801 2024-11-20T17:22:57,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5d6e34e03d3d4a4ab6d37b811a3b9b18 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/5d6e34e03d3d4a4ab6d37b811a3b9b18 2024-11-20T17:22:57,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/8eed907ccc5a4cef96c44fd38e20812d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/8eed907ccc5a4cef96c44fd38e20812d 2024-11-20T17:22:57,373 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/recovered.edits/297.seqid, newMaxSeqId=297, maxSeqId=4 2024-11-20T17:22:57,373 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb. 2024-11-20T17:22:57,373 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for ac48603af62441d2defd4d588b3226cb: 2024-11-20T17:22:57,375 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,375 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=ac48603af62441d2defd4d588b3226cb, regionState=CLOSED 2024-11-20T17:22:57,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T17:22:57,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure ac48603af62441d2defd4d588b3226cb, server=d514dc944523,40121,1732123262111 in 1.8560 sec 2024-11-20T17:22:57,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-20T17:22:57,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ac48603af62441d2defd4d588b3226cb, UNASSIGN in 1.8590 sec 2024-11-20T17:22:57,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T17:22:57,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8610 sec 2024-11-20T17:22:57,380 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123377380"}]},"ts":"1732123377380"} 2024-11-20T17:22:57,380 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:22:57,382 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:22:57,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8740 sec 2024-11-20T17:22:57,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:22:57,616 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T17:22:57,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:22:57,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,618 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:22:57,618 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,620 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,622 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/recovered.edits] 2024-11-20T17:22:57,624 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/04666f71fa1546878a900c7bd6f2c0c5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/04666f71fa1546878a900c7bd6f2c0c5 2024-11-20T17:22:57,625 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/2c521f80de314bf6857e96a86cf470aa to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/2c521f80de314bf6857e96a86cf470aa 2024-11-20T17:22:57,626 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/699ce21539c64b9fb33c5e6286e1cae1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/A/699ce21539c64b9fb33c5e6286e1cae1 2024-11-20T17:22:57,628 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/29eb530d056242b8a8de64609b49a9c4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/29eb530d056242b8a8de64609b49a9c4 2024-11-20T17:22:57,629 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/2a95be18b07241f6a069828d71e69055 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/2a95be18b07241f6a069828d71e69055 2024-11-20T17:22:57,630 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/672b078888214b1abc681fc13efc2ad4 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/B/672b078888214b1abc681fc13efc2ad4 2024-11-20T17:22:57,632 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/99ce8b22640944f693d550ad30251bc0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/99ce8b22640944f693d550ad30251bc0 2024-11-20T17:22:57,632 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/9a892ae8eedf4032a31250acfcb51432 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/9a892ae8eedf4032a31250acfcb51432 2024-11-20T17:22:57,633 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d78500ccff4b47e9bdeede00af1a2692 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/C/d78500ccff4b47e9bdeede00af1a2692 2024-11-20T17:22:57,635 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/recovered.edits/297.seqid to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb/recovered.edits/297.seqid 2024-11-20T17:22:57,635 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,635 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:22:57,636 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:22:57,636 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T17:22:57,638 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120293ac6c4175a4414acd817ea8d0b05ec_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120293ac6c4175a4414acd817ea8d0b05ec_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,639 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202eb1c11764ff41a1a2c92b8089489518_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202eb1c11764ff41a1a2c92b8089489518_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,640 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204c9ea397b78b487d8104917d2ff0e50b_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204c9ea397b78b487d8104917d2ff0e50b_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,641 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112053bb73e19cc747cf9eb8fe7ca1389777_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112053bb73e19cc747cf9eb8fe7ca1389777_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,642 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e6b645b975c497e9a22f7872a570ff1_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e6b645b975c497e9a22f7872a570ff1_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,643 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112071693182144443b9a81160ebb9745923_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112071693182144443b9a81160ebb9745923_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,644 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120946cb806e35a401aa5ebd55837376e9b_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120946cb806e35a401aa5ebd55837376e9b_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,645 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cf23a626ff26442b91fedacce495dca1_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cf23a626ff26442b91fedacce495dca1_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,645 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d3bce4b14a7249fb89a1eef84cfb20e1_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d3bce4b14a7249fb89a1eef84cfb20e1_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,646 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d5a812b5669b44c0acc3f00099a60e4c_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d5a812b5669b44c0acc3f00099a60e4c_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,647 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d797ebd291644906883fe6f997a42f1b_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d797ebd291644906883fe6f997a42f1b_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,648 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dfc5729ffe4446739434ebfbf12cec85_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dfc5729ffe4446739434ebfbf12cec85_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,649 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e6acb990893e4f80bb912e8d7b23fa03_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e6acb990893e4f80bb912e8d7b23fa03_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,650 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ee4dd3bd024c4747a142ce6a22128e01_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ee4dd3bd024c4747a142ce6a22128e01_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,651 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb3b7f8d2da745b19b168f2217329d84_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb3b7f8d2da745b19b168f2217329d84_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,652 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fd11a84b8b934dfa9f76e4f77fc2c29c_ac48603af62441d2defd4d588b3226cb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fd11a84b8b934dfa9f76e4f77fc2c29c_ac48603af62441d2defd4d588b3226cb 2024-11-20T17:22:57,652 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:22:57,654 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,655 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:22:57,657 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:22:57,657 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,657 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:22:57,658 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123377657"}]},"ts":"9223372036854775807"} 2024-11-20T17:22:57,659 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:22:57,659 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ac48603af62441d2defd4d588b3226cb, NAME => 'TestAcidGuarantees,,1732123347555.ac48603af62441d2defd4d588b3226cb.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:22:57,659 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:22:57,659 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123377659"}]},"ts":"9223372036854775807"} 2024-11-20T17:22:57,660 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:22:57,662 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 45 msec 2024-11-20T17:22:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:22:57,719 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-20T17:22:57,728 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239 (was 240), OpenFileDescriptor=450 (was 452), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=288 (was 326), ProcessCount=11 (was 11), AvailableMemoryMB=6139 (was 6173) 2024-11-20T17:22:57,736 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=288, ProcessCount=11, AvailableMemoryMB=6139 2024-11-20T17:22:57,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:22:57,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:22:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:22:57,739 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:22:57,739 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:57,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-11-20T17:22:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T17:22:57,740 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:22:57,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742244_1420 (size=960) 2024-11-20T17:22:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T17:22:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T17:22:58,146 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:22:58,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742245_1421 (size=53) 2024-11-20T17:22:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T17:22:58,551 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:58,551 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0e2c16046f40377cebf7837c5395d623, disabling compactions & flushes 2024-11-20T17:22:58,551 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,551 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,552 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. after waiting 0 ms 2024-11-20T17:22:58,552 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,552 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,552 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:22:58,552 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:22:58,553 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123378552"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123378552"}]},"ts":"1732123378552"} 2024-11-20T17:22:58,554 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:22:58,554 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:22:58,554 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123378554"}]},"ts":"1732123378554"} 2024-11-20T17:22:58,555 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:22:58,559 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, ASSIGN}] 2024-11-20T17:22:58,560 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, ASSIGN 2024-11-20T17:22:58,560 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:22:58,711 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=0e2c16046f40377cebf7837c5395d623, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:58,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure 0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:22:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T17:22:58,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:22:58,866 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,866 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:22:58,866 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,866 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:22:58,866 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,866 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,867 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,868 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:58,868 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e2c16046f40377cebf7837c5395d623 columnFamilyName A 2024-11-20T17:22:58,869 DEBUG [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:58,869 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.HStore(327): Store=0e2c16046f40377cebf7837c5395d623/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:58,869 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,870 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:58,870 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e2c16046f40377cebf7837c5395d623 columnFamilyName B 2024-11-20T17:22:58,870 DEBUG [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:58,870 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.HStore(327): Store=0e2c16046f40377cebf7837c5395d623/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:58,871 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,871 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:22:58,871 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e2c16046f40377cebf7837c5395d623 columnFamilyName C 2024-11-20T17:22:58,872 DEBUG [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:22:58,872 INFO [StoreOpener-0e2c16046f40377cebf7837c5395d623-1 {}] regionserver.HStore(327): Store=0e2c16046f40377cebf7837c5395d623/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:22:58,872 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,873 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,873 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,874 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:22:58,875 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:58,876 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:22:58,876 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened 0e2c16046f40377cebf7837c5395d623; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68226533, jitterRate=0.016654565930366516}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:22:58,877 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:22:58,877 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., pid=127, masterSystemTime=1732123378863 2024-11-20T17:22:58,879 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,879 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:22:58,879 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=0e2c16046f40377cebf7837c5395d623, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:22:58,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T17:22:58,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure 0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 in 168 msec 2024-11-20T17:22:58,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-11-20T17:22:58,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, ASSIGN in 322 msec 2024-11-20T17:22:58,882 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:22:58,882 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123378882"}]},"ts":"1732123378882"} 2024-11-20T17:22:58,883 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:22:58,885 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:22:58,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-11-20T17:22:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-20T17:22:59,843 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-11-20T17:22:59,844 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c60eb7d to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@695c2253 2024-11-20T17:22:59,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63cefe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,849 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,850 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,851 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:22:59,852 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:22:59,853 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-20T17:22:59,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,857 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-20T17:22:59,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,861 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-20T17:22:59,863 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,864 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-20T17:22:59,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,867 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-20T17:22:59,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,870 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-11-20T17:22:59,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,873 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-20T17:22:59,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,877 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-20T17:22:59,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,881 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-20T17:22:59,883 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,884 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-11-20T17:22:59,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:22:59,891 DEBUG [hconnection-0x35c5addb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:22:59,891 DEBUG [hconnection-0x4f11267e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,891 DEBUG [hconnection-0x2f44fa98-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,892 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,892 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,892 DEBUG [hconnection-0x692f6ff2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,892 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-20T17:22:59,893 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,894 DEBUG [hconnection-0x6d7675eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:22:59,894 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:22:59,894 DEBUG [hconnection-0x1ed2dc26-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,895 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:22:59,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:22:59,895 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,895 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,895 DEBUG [hconnection-0x6639c481-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,896 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,899 DEBUG [hconnection-0x403e55b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,900 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:22:59,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:22:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:22:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:22:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:22:59,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:22:59,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:59,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123439909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123439910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:59,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123439910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:59,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:59,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123439911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123439911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:22:59,915 DEBUG [hconnection-0x30b8cd03-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,915 DEBUG [hconnection-0x3a3bd821-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:22:59,916 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,916 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:22:59,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/045cba6be8aa4bbb9a30b16bdea3bd8b is 50, key is test_row_0/A:col10/1732123379898/Put/seqid=0 2024-11-20T17:22:59,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742246_1422 (size=12001) 2024-11-20T17:22:59,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/045cba6be8aa4bbb9a30b16bdea3bd8b 2024-11-20T17:22:59,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/b2fb209b99654aecbcf1331a8f18d09f is 50, key is test_row_0/B:col10/1732123379898/Put/seqid=0 2024-11-20T17:22:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:22:59,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742247_1423 (size=12001) 2024-11-20T17:23:00,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123440012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123440012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123440012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123440012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123440012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,047 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:00,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T17:23:00,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:00,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:23:00,200 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:00,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T17:23:00,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:00,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123440217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123440217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123440217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123440218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123440218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,353 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:00,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T17:23:00,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:00,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:00,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/b2fb209b99654aecbcf1331a8f18d09f 2024-11-20T17:23:00,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/5c4a6daec5b848208b3a2c9f3acf0c8c is 50, key is test_row_0/C:col10/1732123379898/Put/seqid=0 2024-11-20T17:23:00,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742248_1424 (size=12001) 2024-11-20T17:23:00,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/5c4a6daec5b848208b3a2c9f3acf0c8c 2024-11-20T17:23:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/045cba6be8aa4bbb9a30b16bdea3bd8b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/045cba6be8aa4bbb9a30b16bdea3bd8b 2024-11-20T17:23:00,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/045cba6be8aa4bbb9a30b16bdea3bd8b, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:23:00,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/b2fb209b99654aecbcf1331a8f18d09f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/b2fb209b99654aecbcf1331a8f18d09f 2024-11-20T17:23:00,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/b2fb209b99654aecbcf1331a8f18d09f, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:23:00,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/5c4a6daec5b848208b3a2c9f3acf0c8c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/5c4a6daec5b848208b3a2c9f3acf0c8c 2024-11-20T17:23:00,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/5c4a6daec5b848208b3a2c9f3acf0c8c, entries=150, sequenceid=14, filesize=11.7 K 2024-11-20T17:23:00,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 0e2c16046f40377cebf7837c5395d623 in 542ms, sequenceid=14, compaction requested=false 2024-11-20T17:23:00,442 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T17:23:00,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:00,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:23:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:23:00,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:00,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:00,506 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:00,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:00,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/3bb9851efeb14c10be47700884927e79 is 50, key is test_row_0/A:col10/1732123379909/Put/seqid=0 2024-11-20T17:23:00,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742249_1425 (size=12001) 2024-11-20T17:23:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:00,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:00,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123440525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123440528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123440528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123440529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123440532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123440633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123440634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123440634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123440634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123440636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123440836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123440837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123440837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123440838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:00,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123440840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:00,915 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/3bb9851efeb14c10be47700884927e79 2024-11-20T17:23:00,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/8e0de97c490246808c27341c4fdc2039 is 50, key is test_row_0/B:col10/1732123379909/Put/seqid=0 2024-11-20T17:23:00,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742250_1426 (size=12001) 2024-11-20T17:23:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:23:01,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123441140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123441141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123441143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123441143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123441144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,328 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/8e0de97c490246808c27341c4fdc2039 2024-11-20T17:23:01,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/62b66cb9f6df4282af5118bb7c9f8048 is 50, key is test_row_0/C:col10/1732123379909/Put/seqid=0 2024-11-20T17:23:01,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742251_1427 (size=12001) 2024-11-20T17:23:01,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123441642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123441646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123441648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123441650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:01,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123441651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:01,740 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/62b66cb9f6df4282af5118bb7c9f8048 2024-11-20T17:23:01,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/3bb9851efeb14c10be47700884927e79 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/3bb9851efeb14c10be47700884927e79 2024-11-20T17:23:01,749 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/3bb9851efeb14c10be47700884927e79, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T17:23:01,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/8e0de97c490246808c27341c4fdc2039 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/8e0de97c490246808c27341c4fdc2039 2024-11-20T17:23:01,753 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/8e0de97c490246808c27341c4fdc2039, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T17:23:01,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/62b66cb9f6df4282af5118bb7c9f8048 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/62b66cb9f6df4282af5118bb7c9f8048 2024-11-20T17:23:01,757 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/62b66cb9f6df4282af5118bb7c9f8048, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T17:23:01,758 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0e2c16046f40377cebf7837c5395d623 in 1252ms, sequenceid=39, compaction requested=false 2024-11-20T17:23:01,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:01,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:01,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-20T17:23:01,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-20T17:23:01,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T17:23:01,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8640 sec 2024-11-20T17:23:01,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.8700 sec 2024-11-20T17:23:01,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:23:01,998 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T17:23:01,999 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T17:23:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:23:02,001 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:02,001 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:02,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:02,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:23:02,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:02,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:23:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:02,154 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:23:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:02,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:02,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:02,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/7492edb981454cdfb8ffca0d90f35e00 is 50, key is test_row_0/A:col10/1732123380528/Put/seqid=0 2024-11-20T17:23:02,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742252_1428 (size=9657) 2024-11-20T17:23:02,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:23:02,575 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/7492edb981454cdfb8ffca0d90f35e00 2024-11-20T17:23:02,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/e0cf4df5ece14445888d77638f398831 is 50, key is test_row_0/B:col10/1732123380528/Put/seqid=0 2024-11-20T17:23:02,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742253_1429 (size=9657) 2024-11-20T17:23:02,598 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/e0cf4df5ece14445888d77638f398831 2024-11-20T17:23:02,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:23:02,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/342b25c854994e18a62e90ad14bc3205 is 50, key is test_row_0/C:col10/1732123380528/Put/seqid=0 2024-11-20T17:23:02,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742254_1430 (size=9657) 2024-11-20T17:23:02,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:02,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:02,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123442720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123442721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123442724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123442729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123442729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123442830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123442830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123442830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123442838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:02,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123442838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:02,899 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:23:03,009 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/342b25c854994e18a62e90ad14bc3205 2024-11-20T17:23:03,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/7492edb981454cdfb8ffca0d90f35e00 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7492edb981454cdfb8ffca0d90f35e00 2024-11-20T17:23:03,017 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7492edb981454cdfb8ffca0d90f35e00, entries=100, sequenceid=50, filesize=9.4 K 2024-11-20T17:23:03,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/e0cf4df5ece14445888d77638f398831 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/e0cf4df5ece14445888d77638f398831 2024-11-20T17:23:03,022 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/e0cf4df5ece14445888d77638f398831, entries=100, sequenceid=50, filesize=9.4 K 2024-11-20T17:23:03,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/342b25c854994e18a62e90ad14bc3205 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/342b25c854994e18a62e90ad14bc3205 2024-11-20T17:23:03,025 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/342b25c854994e18a62e90ad14bc3205, entries=100, sequenceid=50, filesize=9.4 K 2024-11-20T17:23:03,026 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0e2c16046f40377cebf7837c5395d623 in 872ms, sequenceid=50, compaction requested=true 2024-11-20T17:23:03,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:03,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T17:23:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T17:23:03,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T17:23:03,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0260 sec 2024-11-20T17:23:03,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.0300 sec 2024-11-20T17:23:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:03,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:23:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:03,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/c788e5b43fa746afa08dda588898ba41 is 50, key is test_row_0/A:col10/1732123382728/Put/seqid=0 2024-11-20T17:23:03,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742255_1431 (size=14341) 2024-11-20T17:23:03,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123443042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123443043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123443045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123443046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123443047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:23:03,104 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T17:23:03,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T17:23:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:23:03,107 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:03,107 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:03,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:03,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123443152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123443152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123443153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123443153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:23:03,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:03,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:03,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:03,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123443350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123443355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123443355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123443358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123443358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:23:03,411 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:03,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:03,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:03,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/c788e5b43fa746afa08dda588898ba41 2024-11-20T17:23:03,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/bcd025597e534dab84eea8f0f795188e is 50, key is test_row_0/B:col10/1732123382728/Put/seqid=0 2024-11-20T17:23:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742256_1432 (size=12001) 2024-11-20T17:23:03,564 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:03,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:03,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:03,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123443660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123443662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123443660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123443664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:23:03,716 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:03,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/bcd025597e534dab84eea8f0f795188e 2024-11-20T17:23:03,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123443856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:03,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/9267d789e57348c993000381b38be58e is 50, key is test_row_0/C:col10/1732123382728/Put/seqid=0 2024-11-20T17:23:03,868 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:03,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742257_1433 (size=12001) 2024-11-20T17:23:03,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:03,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:03,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:03,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,020 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:04,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:04,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:04,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123444167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:04,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:04,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123444169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:04,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:04,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123444169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:04,173 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:04,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:04,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:04,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:04,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123444172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:04,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:23:04,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/9267d789e57348c993000381b38be58e 2024-11-20T17:23:04,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/c788e5b43fa746afa08dda588898ba41 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/c788e5b43fa746afa08dda588898ba41 2024-11-20T17:23:04,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/c788e5b43fa746afa08dda588898ba41, entries=200, sequenceid=76, filesize=14.0 K 2024-11-20T17:23:04,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/bcd025597e534dab84eea8f0f795188e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/bcd025597e534dab84eea8f0f795188e 2024-11-20T17:23:04,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/bcd025597e534dab84eea8f0f795188e, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T17:23:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/9267d789e57348c993000381b38be58e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9267d789e57348c993000381b38be58e 2024-11-20T17:23:04,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9267d789e57348c993000381b38be58e, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T17:23:04,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0e2c16046f40377cebf7837c5395d623 in 1249ms, sequenceid=76, compaction requested=true 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:04,285 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:23:04,285 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:23:04,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:04,286 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:23:04,286 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48000 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:23:04,286 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:04,286 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:04,286 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,286 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,286 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/b2fb209b99654aecbcf1331a8f18d09f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/8e0de97c490246808c27341c4fdc2039, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/e0cf4df5ece14445888d77638f398831, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/bcd025597e534dab84eea8f0f795188e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=44.6 K 2024-11-20T17:23:04,287 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/045cba6be8aa4bbb9a30b16bdea3bd8b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/3bb9851efeb14c10be47700884927e79, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7492edb981454cdfb8ffca0d90f35e00, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/c788e5b43fa746afa08dda588898ba41] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=46.9 K 2024-11-20T17:23:04,287 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting b2fb209b99654aecbcf1331a8f18d09f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123379898 2024-11-20T17:23:04,287 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 045cba6be8aa4bbb9a30b16bdea3bd8b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123379898 2024-11-20T17:23:04,288 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bb9851efeb14c10be47700884927e79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732123379909 2024-11-20T17:23:04,288 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e0de97c490246808c27341c4fdc2039, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732123379909 2024-11-20T17:23:04,289 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7492edb981454cdfb8ffca0d90f35e00, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732123380528 2024-11-20T17:23:04,289 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting e0cf4df5ece14445888d77638f398831, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732123380528 2024-11-20T17:23:04,289 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd025597e534dab84eea8f0f795188e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123382719 2024-11-20T17:23:04,289 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting c788e5b43fa746afa08dda588898ba41, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123382719 2024-11-20T17:23:04,298 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#364 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:04,298 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:04,299 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/eb04859b6f4842548a02c96f16181b19 is 50, key is test_row_0/B:col10/1732123382728/Put/seqid=0 2024-11-20T17:23:04,299 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/166013e67f734ca8a5c9b625df08439a is 50, key is test_row_0/A:col10/1732123382728/Put/seqid=0 2024-11-20T17:23:04,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742259_1435 (size=12139) 2024-11-20T17:23:04,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742258_1434 (size=12139) 2024-11-20T17:23:04,310 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/eb04859b6f4842548a02c96f16181b19 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/eb04859b6f4842548a02c96f16181b19 2024-11-20T17:23:04,316 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into eb04859b6f4842548a02c96f16181b19(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:04,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:04,316 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=12, startTime=1732123384285; duration=0sec 2024-11-20T17:23:04,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:04,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:04,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:23:04,317 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:23:04,317 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:04,317 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,317 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/5c4a6daec5b848208b3a2c9f3acf0c8c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/62b66cb9f6df4282af5118bb7c9f8048, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/342b25c854994e18a62e90ad14bc3205, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9267d789e57348c993000381b38be58e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=44.6 K 2024-11-20T17:23:04,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c4a6daec5b848208b3a2c9f3acf0c8c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732123379898 2024-11-20T17:23:04,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 62b66cb9f6df4282af5118bb7c9f8048, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732123379909 2024-11-20T17:23:04,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 342b25c854994e18a62e90ad14bc3205, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732123380528 2024-11-20T17:23:04,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9267d789e57348c993000381b38be58e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123382719 2024-11-20T17:23:04,326 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:04,326 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:04,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:23:04,326 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/23441a4ab41d43a9abe57e1d2fe519fe is 50, key is test_row_0/C:col10/1732123382728/Put/seqid=0 2024-11-20T17:23:04,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:04,326 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:23:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:04,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742260_1436 (size=12139) 2024-11-20T17:23:04,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b8e60b095a3244fe9a02057470591e42 is 50, key is test_row_0/A:col10/1732123383046/Put/seqid=0 2024-11-20T17:23:04,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742261_1437 (size=12001) 2024-11-20T17:23:04,342 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b8e60b095a3244fe9a02057470591e42 2024-11-20T17:23:04,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/51e42221864a4deb85925c68db535dd0 is 50, key is test_row_0/B:col10/1732123383046/Put/seqid=0 2024-11-20T17:23:04,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742262_1438 (size=12001) 2024-11-20T17:23:04,707 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/166013e67f734ca8a5c9b625df08439a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/166013e67f734ca8a5c9b625df08439a 2024-11-20T17:23:04,711 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into 166013e67f734ca8a5c9b625df08439a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:04,711 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:04,711 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=12, startTime=1732123384285; duration=0sec 2024-11-20T17:23:04,711 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:04,712 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:04,734 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/23441a4ab41d43a9abe57e1d2fe519fe as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/23441a4ab41d43a9abe57e1d2fe519fe 2024-11-20T17:23:04,738 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into 23441a4ab41d43a9abe57e1d2fe519fe(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:04,738 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:04,738 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=12, startTime=1732123384285; duration=0sec 2024-11-20T17:23:04,738 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:04,738 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:04,753 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/51e42221864a4deb85925c68db535dd0 2024-11-20T17:23:04,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/741e782e63b74fe0a64126b5606046a6 is 50, key is test_row_0/C:col10/1732123383046/Put/seqid=0 2024-11-20T17:23:04,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742263_1439 (size=12001) 2024-11-20T17:23:04,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:04,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:04,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123444945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123445050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,163 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/741e782e63b74fe0a64126b5606046a6 2024-11-20T17:23:05,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b8e60b095a3244fe9a02057470591e42 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b8e60b095a3244fe9a02057470591e42 2024-11-20T17:23:05,170 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b8e60b095a3244fe9a02057470591e42, entries=150, sequenceid=87, filesize=11.7 K 2024-11-20T17:23:05,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/51e42221864a4deb85925c68db535dd0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/51e42221864a4deb85925c68db535dd0 2024-11-20T17:23:05,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123445171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,175 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/51e42221864a4deb85925c68db535dd0, entries=150, sequenceid=87, filesize=11.7 K 2024-11-20T17:23:05,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/741e782e63b74fe0a64126b5606046a6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/741e782e63b74fe0a64126b5606046a6 2024-11-20T17:23:05,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123445174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,179 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/741e782e63b74fe0a64126b5606046a6, entries=150, sequenceid=87, filesize=11.7 K 2024-11-20T17:23:05,180 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 0e2c16046f40377cebf7837c5395d623 in 853ms, sequenceid=87, compaction requested=false 2024-11-20T17:23:05,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:05,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T17:23:05,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T17:23:05,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:05,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T17:23:05,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T17:23:05,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0740 sec 2024-11-20T17:23:05,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:05,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:05,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:05,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.0780 sec 2024-11-20T17:23:05,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/64845cdadf2a45feac6a331f176e2372 is 50, key is test_row_0/A:col10/1732123384934/Put/seqid=0 2024-11-20T17:23:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742264_1440 (size=16681) 2024-11-20T17:23:05,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123445196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123445196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:23:05,210 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T17:23:05,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:05,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T17:23:05,212 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:05,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:23:05,213 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:05,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:05,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123445253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123445297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123445297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:23:05,364 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:05,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:05,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:05,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,365 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123445502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123445503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:23:05,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:05,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:05,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:05,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123445558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/64845cdadf2a45feac6a331f176e2372 2024-11-20T17:23:05,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/f39c3155ae2a42cd85f85d5f8d844aac is 50, key is test_row_0/B:col10/1732123384934/Put/seqid=0 2024-11-20T17:23:05,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742265_1441 (size=12001) 2024-11-20T17:23:05,675 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:05,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:05,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:05,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123445807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123445807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:23:05,828 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:05,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:05,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:05,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,981 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:05,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:05,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:05,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:05,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:05,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/f39c3155ae2a42cd85f85d5f8d844aac 2024-11-20T17:23:06,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/331a1b1857ac45a0aa03d60361a0c187 is 50, key is test_row_0/C:col10/1732123384934/Put/seqid=0 2024-11-20T17:23:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742266_1442 (size=12001) 2024-11-20T17:23:06,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123446061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:06,133 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:06,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:06,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:06,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,286 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:06,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:06,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:06,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:06,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123446311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:23:06,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123446313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:06,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/331a1b1857ac45a0aa03d60361a0c187 2024-11-20T17:23:06,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/64845cdadf2a45feac6a331f176e2372 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/64845cdadf2a45feac6a331f176e2372 2024-11-20T17:23:06,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/64845cdadf2a45feac6a331f176e2372, entries=250, sequenceid=117, filesize=16.3 K 2024-11-20T17:23:06,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/f39c3155ae2a42cd85f85d5f8d844aac as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f39c3155ae2a42cd85f85d5f8d844aac 2024-11-20T17:23:06,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f39c3155ae2a42cd85f85d5f8d844aac, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T17:23:06,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/331a1b1857ac45a0aa03d60361a0c187 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/331a1b1857ac45a0aa03d60361a0c187 2024-11-20T17:23:06,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/331a1b1857ac45a0aa03d60361a0c187, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T17:23:06,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 0e2c16046f40377cebf7837c5395d623 in 1252ms, sequenceid=117, compaction requested=true 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:06,434 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:06,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:06,434 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:06,435 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:06,435 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:06,436 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:06,436 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:06,436 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,436 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/eb04859b6f4842548a02c96f16181b19, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/51e42221864a4deb85925c68db535dd0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f39c3155ae2a42cd85f85d5f8d844aac] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.3 K 2024-11-20T17:23:06,436 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,436 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/166013e67f734ca8a5c9b625df08439a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b8e60b095a3244fe9a02057470591e42, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/64845cdadf2a45feac6a331f176e2372] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=39.9 K 2024-11-20T17:23:06,437 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 166013e67f734ca8a5c9b625df08439a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123382719 2024-11-20T17:23:06,437 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting eb04859b6f4842548a02c96f16181b19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123382719 2024-11-20T17:23:06,437 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 51e42221864a4deb85925c68db535dd0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732123383042 2024-11-20T17:23:06,437 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8e60b095a3244fe9a02057470591e42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732123383042 2024-11-20T17:23:06,437 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f39c3155ae2a42cd85f85d5f8d844aac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732123384934 2024-11-20T17:23:06,437 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64845cdadf2a45feac6a331f176e2372, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732123384934 2024-11-20T17:23:06,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:06,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:23:06,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,440 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:23:06,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:06,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:06,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:06,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:06,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:06,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:06,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b52e6c1d1d4b4eba802018b09973a781 is 50, key is test_row_0/A:col10/1732123385194/Put/seqid=0 2024-11-20T17:23:06,446 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#374 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:06,447 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/af5b43f7037e4ca786db7880c69ac78d is 50, key is test_row_0/B:col10/1732123384934/Put/seqid=0 2024-11-20T17:23:06,454 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#375 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:06,454 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/00834fbe3e8746b6a1464196427a96c9 is 50, key is test_row_0/A:col10/1732123384934/Put/seqid=0 2024-11-20T17:23:06,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742267_1443 (size=12001) 2024-11-20T17:23:06,462 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b52e6c1d1d4b4eba802018b09973a781 2024-11-20T17:23:06,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742268_1444 (size=12241) 2024-11-20T17:23:06,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742269_1445 (size=12241) 2024-11-20T17:23:06,468 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/af5b43f7037e4ca786db7880c69ac78d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/af5b43f7037e4ca786db7880c69ac78d 2024-11-20T17:23:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/571bd10e4e7d440cb5ba8dc05af6cdec is 50, key is test_row_0/B:col10/1732123385194/Put/seqid=0 2024-11-20T17:23:06,475 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into af5b43f7037e4ca786db7880c69ac78d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:06,475 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:06,475 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=13, startTime=1732123386434; duration=0sec 2024-11-20T17:23:06,475 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:06,475 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:06,475 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:06,476 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:06,476 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:06,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742270_1446 (size=12001) 2024-11-20T17:23:06,476 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,477 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/23441a4ab41d43a9abe57e1d2fe519fe, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/741e782e63b74fe0a64126b5606046a6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/331a1b1857ac45a0aa03d60361a0c187] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.3 K 2024-11-20T17:23:06,477 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 23441a4ab41d43a9abe57e1d2fe519fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123382719 2024-11-20T17:23:06,478 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 741e782e63b74fe0a64126b5606046a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732123383042 2024-11-20T17:23:06,478 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 331a1b1857ac45a0aa03d60361a0c187, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732123384934 2024-11-20T17:23:06,480 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/571bd10e4e7d440cb5ba8dc05af6cdec 2024-11-20T17:23:06,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/9cc6749750d34f62bb5b89e08fb66e97 is 50, key is test_row_0/C:col10/1732123385194/Put/seqid=0 2024-11-20T17:23:06,488 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:06,489 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/b3fb30ee84e24412a785428bbbff98d0 is 50, key is test_row_0/C:col10/1732123384934/Put/seqid=0 2024-11-20T17:23:06,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742271_1447 (size=12001) 2024-11-20T17:23:06,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742272_1448 (size=12241) 2024-11-20T17:23:06,869 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/00834fbe3e8746b6a1464196427a96c9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/00834fbe3e8746b6a1464196427a96c9 2024-11-20T17:23:06,873 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into 00834fbe3e8746b6a1464196427a96c9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:06,873 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:06,873 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=13, startTime=1732123386434; duration=0sec 2024-11-20T17:23:06,873 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:06,873 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:06,895 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/9cc6749750d34f62bb5b89e08fb66e97 2024-11-20T17:23:06,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b52e6c1d1d4b4eba802018b09973a781 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b52e6c1d1d4b4eba802018b09973a781 2024-11-20T17:23:06,903 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/b3fb30ee84e24412a785428bbbff98d0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/b3fb30ee84e24412a785428bbbff98d0 2024-11-20T17:23:06,904 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b52e6c1d1d4b4eba802018b09973a781, entries=150, sequenceid=125, filesize=11.7 K 2024-11-20T17:23:06,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/571bd10e4e7d440cb5ba8dc05af6cdec as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/571bd10e4e7d440cb5ba8dc05af6cdec 2024-11-20T17:23:06,907 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into b3fb30ee84e24412a785428bbbff98d0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:06,908 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:06,908 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=13, startTime=1732123386434; duration=0sec 2024-11-20T17:23:06,908 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:06,908 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:06,908 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/571bd10e4e7d440cb5ba8dc05af6cdec, entries=150, sequenceid=125, filesize=11.7 K 2024-11-20T17:23:06,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/9cc6749750d34f62bb5b89e08fb66e97 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9cc6749750d34f62bb5b89e08fb66e97 2024-11-20T17:23:06,913 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9cc6749750d34f62bb5b89e08fb66e97, entries=150, sequenceid=125, filesize=11.7 K 2024-11-20T17:23:06,913 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 0e2c16046f40377cebf7837c5395d623 in 473ms, sequenceid=125, compaction requested=false 2024-11-20T17:23:06,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:06,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:06,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T17:23:06,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T17:23:06,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T17:23:06,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7010 sec 2024-11-20T17:23:06,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.7050 sec 2024-11-20T17:23:07,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:07,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:23:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:07,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/55a432ef572d40dca4621915d1abd7d1 is 50, key is test_row_0/A:col10/1732123387084/Put/seqid=0 2024-11-20T17:23:07,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742273_1449 (size=14541) 2024-11-20T17:23:07,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123447193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123447193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123447196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,201 DEBUG [Thread-1892 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:07,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123447297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123447301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:23:07,316 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T17:23:07,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:07,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T17:23:07,319 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:07,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:07,319 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:07,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:07,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123447317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123447325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:07,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:07,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:07,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:07,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/55a432ef572d40dca4621915d1abd7d1 2024-11-20T17:23:07,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123447502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/892f5a3a66924cdcb22c2f786d2a439e is 50, key is test_row_0/B:col10/1732123387084/Put/seqid=0 2024-11-20T17:23:07,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742274_1450 (size=12151) 2024-11-20T17:23:07,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123447507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:07,624 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:07,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,777 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:07,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123447808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123447811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:07,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/892f5a3a66924cdcb22c2f786d2a439e 2024-11-20T17:23:07,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/0e6df415fb0b42b38789251b6f748b1f is 50, key is test_row_0/C:col10/1732123387084/Put/seqid=0 2024-11-20T17:23:07,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742275_1451 (size=12151) 2024-11-20T17:23:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:07,930 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:07,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:07,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:07,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:08,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:08,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:08,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:08,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:08,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:08,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:08,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123448313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:08,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/0e6df415fb0b42b38789251b6f748b1f 2024-11-20T17:23:08,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/55a432ef572d40dca4621915d1abd7d1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/55a432ef572d40dca4621915d1abd7d1 2024-11-20T17:23:08,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123448319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:08,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/55a432ef572d40dca4621915d1abd7d1, entries=200, sequenceid=139, filesize=14.2 K 2024-11-20T17:23:08,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/892f5a3a66924cdcb22c2f786d2a439e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/892f5a3a66924cdcb22c2f786d2a439e 2024-11-20T17:23:08,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/892f5a3a66924cdcb22c2f786d2a439e, entries=150, sequenceid=139, filesize=11.9 K 2024-11-20T17:23:08,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/0e6df415fb0b42b38789251b6f748b1f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/0e6df415fb0b42b38789251b6f748b1f 2024-11-20T17:23:08,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/0e6df415fb0b42b38789251b6f748b1f, entries=150, sequenceid=139, filesize=11.9 K 2024-11-20T17:23:08,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0e2c16046f40377cebf7837c5395d623 in 1247ms, sequenceid=139, compaction requested=true 2024-11-20T17:23:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:08,336 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:08,336 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:08,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:08,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:08,337 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:08,337 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:08,337 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:08,337 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:08,337 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,337 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,337 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/00834fbe3e8746b6a1464196427a96c9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b52e6c1d1d4b4eba802018b09973a781, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/55a432ef572d40dca4621915d1abd7d1] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=37.9 K 2024-11-20T17:23:08,337 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/af5b43f7037e4ca786db7880c69ac78d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/571bd10e4e7d440cb5ba8dc05af6cdec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/892f5a3a66924cdcb22c2f786d2a439e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.5 K 2024-11-20T17:23:08,338 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting af5b43f7037e4ca786db7880c69ac78d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732123384934 2024-11-20T17:23:08,338 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00834fbe3e8746b6a1464196427a96c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732123384934 2024-11-20T17:23:08,338 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 571bd10e4e7d440cb5ba8dc05af6cdec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1732123385190 2024-11-20T17:23:08,338 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b52e6c1d1d4b4eba802018b09973a781, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1732123385190 2024-11-20T17:23:08,338 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 892f5a3a66924cdcb22c2f786d2a439e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732123387081 2024-11-20T17:23:08,338 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55a432ef572d40dca4621915d1abd7d1, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732123387081 2024-11-20T17:23:08,345 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#383 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:08,346 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#382 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:08,346 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/126bce125320480095820c9127cf9d96 is 50, key is test_row_0/B:col10/1732123387084/Put/seqid=0 2024-11-20T17:23:08,346 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/ee7dcff6f3d5428eacc82687945d4946 is 50, key is test_row_0/A:col10/1732123387084/Put/seqid=0 2024-11-20T17:23:08,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742276_1452 (size=12493) 2024-11-20T17:23:08,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742277_1453 (size=12493) 2024-11-20T17:23:08,388 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:08,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,389 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:08,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:08,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/44e04bc8484d491ebe5bc6be921e44ca is 50, key is test_row_0/A:col10/1732123387195/Put/seqid=0 2024-11-20T17:23:08,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742278_1454 (size=12151) 2024-11-20T17:23:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:08,758 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/ee7dcff6f3d5428eacc82687945d4946 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/ee7dcff6f3d5428eacc82687945d4946 2024-11-20T17:23:08,762 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into ee7dcff6f3d5428eacc82687945d4946(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:08,762 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:08,762 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=13, startTime=1732123388336; duration=0sec 2024-11-20T17:23:08,763 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:08,763 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:08,763 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:08,764 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:08,764 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:08,765 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:08,765 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/b3fb30ee84e24412a785428bbbff98d0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9cc6749750d34f62bb5b89e08fb66e97, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/0e6df415fb0b42b38789251b6f748b1f] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.5 K 2024-11-20T17:23:08,765 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3fb30ee84e24412a785428bbbff98d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732123384934 2024-11-20T17:23:08,766 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cc6749750d34f62bb5b89e08fb66e97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1732123385190 2024-11-20T17:23:08,766 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e6df415fb0b42b38789251b6f748b1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732123387081 2024-11-20T17:23:08,768 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/126bce125320480095820c9127cf9d96 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/126bce125320480095820c9127cf9d96 2024-11-20T17:23:08,772 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into 126bce125320480095820c9127cf9d96(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:08,772 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:08,772 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=13, startTime=1732123388336; duration=0sec 2024-11-20T17:23:08,772 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:08,772 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:08,781 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:08,782 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/d7304a2fb97e47ea8da6efeaf3b8361e is 50, key is test_row_0/C:col10/1732123387084/Put/seqid=0 2024-11-20T17:23:08,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742279_1455 (size=12493) 2024-11-20T17:23:08,795 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/d7304a2fb97e47ea8da6efeaf3b8361e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d7304a2fb97e47ea8da6efeaf3b8361e 2024-11-20T17:23:08,801 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/44e04bc8484d491ebe5bc6be921e44ca 2024-11-20T17:23:08,801 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into d7304a2fb97e47ea8da6efeaf3b8361e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:08,801 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:08,801 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=13, startTime=1732123388337; duration=0sec 2024-11-20T17:23:08,801 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:08,801 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:08,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/f1955a9485f84a088cb4825d901a00d5 is 50, key is test_row_0/B:col10/1732123387195/Put/seqid=0 2024-11-20T17:23:08,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742280_1456 (size=12151) 2024-11-20T17:23:09,216 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/f1955a9485f84a088cb4825d901a00d5 2024-11-20T17:23:09,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/85f3e5c460f4462093122a62dd7ec795 is 50, key is test_row_0/C:col10/1732123387195/Put/seqid=0 2024-11-20T17:23:09,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742281_1457 (size=12151) 2024-11-20T17:23:09,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:09,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123449333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123449333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123449334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123449335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:09,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123449437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123449437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123449439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123449439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,627 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/85f3e5c460f4462093122a62dd7ec795 2024-11-20T17:23:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/44e04bc8484d491ebe5bc6be921e44ca as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/44e04bc8484d491ebe5bc6be921e44ca 2024-11-20T17:23:09,635 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/44e04bc8484d491ebe5bc6be921e44ca, entries=150, sequenceid=164, filesize=11.9 K 2024-11-20T17:23:09,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/f1955a9485f84a088cb4825d901a00d5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f1955a9485f84a088cb4825d901a00d5 2024-11-20T17:23:09,642 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f1955a9485f84a088cb4825d901a00d5, entries=150, sequenceid=164, filesize=11.9 K 2024-11-20T17:23:09,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/85f3e5c460f4462093122a62dd7ec795 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/85f3e5c460f4462093122a62dd7ec795 2024-11-20T17:23:09,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123449641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123449642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123449642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123449642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,647 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/85f3e5c460f4462093122a62dd7ec795, entries=150, sequenceid=164, filesize=11.9 K 2024-11-20T17:23:09,647 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0e2c16046f40377cebf7837c5395d623 in 1258ms, sequenceid=164, compaction requested=false 2024-11-20T17:23:09,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:09,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:09,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T17:23:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T17:23:09,650 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T17:23:09,650 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3280 sec 2024-11-20T17:23:09,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.3330 sec 2024-11-20T17:23:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:09,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:23:09,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:09,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:09,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:09,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:09,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:09,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:09,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/d5947b4bcb04454db5d4bfaba19cc806 is 50, key is test_row_0/A:col10/1732123389334/Put/seqid=0 2024-11-20T17:23:09,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742282_1458 (size=12151) 2024-11-20T17:23:09,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123449978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123449979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123449979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:09,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123449983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123450084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123450085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123450086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123450088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123450289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123450293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123450293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123450294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/d5947b4bcb04454db5d4bfaba19cc806 2024-11-20T17:23:10,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/116def9dbf9c473791a64cc8971cf985 is 50, key is test_row_0/B:col10/1732123389334/Put/seqid=0 2024-11-20T17:23:10,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742283_1459 (size=12151) 2024-11-20T17:23:10,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123450594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123450598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123450598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:10,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123450598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:10,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/116def9dbf9c473791a64cc8971cf985 2024-11-20T17:23:10,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/d26e93f616f245a3b64c6091026e864e is 50, key is test_row_0/C:col10/1732123389334/Put/seqid=0 2024-11-20T17:23:10,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742284_1460 (size=12151) 2024-11-20T17:23:11,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123451101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123451103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123451105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123451107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/d26e93f616f245a3b64c6091026e864e 2024-11-20T17:23:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/d5947b4bcb04454db5d4bfaba19cc806 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d5947b4bcb04454db5d4bfaba19cc806 2024-11-20T17:23:11,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d5947b4bcb04454db5d4bfaba19cc806, entries=150, sequenceid=180, filesize=11.9 K 2024-11-20T17:23:11,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/116def9dbf9c473791a64cc8971cf985 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/116def9dbf9c473791a64cc8971cf985 2024-11-20T17:23:11,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/116def9dbf9c473791a64cc8971cf985, entries=150, sequenceid=180, filesize=11.9 K 2024-11-20T17:23:11,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/d26e93f616f245a3b64c6091026e864e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d26e93f616f245a3b64c6091026e864e 2024-11-20T17:23:11,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d26e93f616f245a3b64c6091026e864e, entries=150, sequenceid=180, filesize=11.9 K 2024-11-20T17:23:11,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0e2c16046f40377cebf7837c5395d623 in 1248ms, sequenceid=180, compaction requested=true 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:11,197 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:11,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:11,197 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:11,198 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:11,198 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:11,198 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:11,198 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:11,198 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,198 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,198 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/ee7dcff6f3d5428eacc82687945d4946, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/44e04bc8484d491ebe5bc6be921e44ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d5947b4bcb04454db5d4bfaba19cc806] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.9 K 2024-11-20T17:23:11,198 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/126bce125320480095820c9127cf9d96, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f1955a9485f84a088cb4825d901a00d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/116def9dbf9c473791a64cc8971cf985] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.9 K 2024-11-20T17:23:11,198 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee7dcff6f3d5428eacc82687945d4946, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732123387081 2024-11-20T17:23:11,198 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 126bce125320480095820c9127cf9d96, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732123387081 2024-11-20T17:23:11,199 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting f1955a9485f84a088cb4825d901a00d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732123387177 2024-11-20T17:23:11,199 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44e04bc8484d491ebe5bc6be921e44ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732123387177 2024-11-20T17:23:11,199 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 116def9dbf9c473791a64cc8971cf985, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732123389334 2024-11-20T17:23:11,199 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5947b4bcb04454db5d4bfaba19cc806, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732123389334 2024-11-20T17:23:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:11,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:23:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:11,214 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#391 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:11,215 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#392 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:11,215 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/3d17deabb50b4a89b5d060c76ffa32fa is 50, key is test_row_0/B:col10/1732123389334/Put/seqid=0 2024-11-20T17:23:11,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/69be695c737a476c956c2305cb549a96 is 50, key is test_row_0/A:col10/1732123389982/Put/seqid=0 2024-11-20T17:23:11,215 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/1a403d16a842483a811dc554356c8688 is 50, key is test_row_0/A:col10/1732123389334/Put/seqid=0 2024-11-20T17:23:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742286_1462 (size=12595) 2024-11-20T17:23:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742285_1461 (size=14541) 2024-11-20T17:23:11,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742287_1463 (size=12595) 2024-11-20T17:23:11,228 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/1a403d16a842483a811dc554356c8688 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1a403d16a842483a811dc554356c8688 2024-11-20T17:23:11,232 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into 1a403d16a842483a811dc554356c8688(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:11,232 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:11,232 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=13, startTime=1732123391197; duration=0sec 2024-11-20T17:23:11,232 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:11,232 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:11,232 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:11,233 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:11,233 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:11,233 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,233 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d7304a2fb97e47ea8da6efeaf3b8361e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/85f3e5c460f4462093122a62dd7ec795, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d26e93f616f245a3b64c6091026e864e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=35.9 K 2024-11-20T17:23:11,233 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7304a2fb97e47ea8da6efeaf3b8361e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732123387081 2024-11-20T17:23:11,233 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85f3e5c460f4462093122a62dd7ec795, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732123387177 2024-11-20T17:23:11,234 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d26e93f616f245a3b64c6091026e864e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732123389334 2024-11-20T17:23:11,240 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#394 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:11,241 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/8f96876136b04a22976c493c9715d29a is 50, key is test_row_0/C:col10/1732123389334/Put/seqid=0 2024-11-20T17:23:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742288_1464 (size=12595) 2024-11-20T17:23:11,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123451265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123451371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:23:11,425 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T17:23:11,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T17:23:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:11,428 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:11,428 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:11,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:11,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123451575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:11,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:11,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:11,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/69be695c737a476c956c2305cb549a96 2024-11-20T17:23:11,629 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/3d17deabb50b4a89b5d060c76ffa32fa as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3d17deabb50b4a89b5d060c76ffa32fa 2024-11-20T17:23:11,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/6486e7dd110d486c9b412c6091ac91a6 is 50, key is test_row_0/B:col10/1732123389982/Put/seqid=0 2024-11-20T17:23:11,633 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into 3d17deabb50b4a89b5d060c76ffa32fa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:11,633 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:11,633 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=13, startTime=1732123391197; duration=0sec 2024-11-20T17:23:11,633 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:11,633 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:11,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742289_1465 (size=12151) 2024-11-20T17:23:11,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/6486e7dd110d486c9b412c6091ac91a6 2024-11-20T17:23:11,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/4884efb449e54c019849e1b37cd6ba12 is 50, key is test_row_0/C:col10/1732123389982/Put/seqid=0 2024-11-20T17:23:11,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742290_1466 (size=12151) 2024-11-20T17:23:11,649 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/8f96876136b04a22976c493c9715d29a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/8f96876136b04a22976c493c9715d29a 2024-11-20T17:23:11,653 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into 8f96876136b04a22976c493c9715d29a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:11,653 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:11,653 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=13, startTime=1732123391197; duration=0sec 2024-11-20T17:23:11,653 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:11,653 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:11,733 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:11,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:11,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:11,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:11,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123451882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:11,885 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:11,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:11,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:12,038 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/4884efb449e54c019849e1b37cd6ba12 2024-11-20T17:23:12,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/69be695c737a476c956c2305cb549a96 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/69be695c737a476c956c2305cb549a96 2024-11-20T17:23:12,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/69be695c737a476c956c2305cb549a96, entries=200, sequenceid=204, filesize=14.2 K 2024-11-20T17:23:12,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/6486e7dd110d486c9b412c6091ac91a6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/6486e7dd110d486c9b412c6091ac91a6 2024-11-20T17:23:12,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/6486e7dd110d486c9b412c6091ac91a6, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T17:23:12,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/4884efb449e54c019849e1b37cd6ba12 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4884efb449e54c019849e1b37cd6ba12 2024-11-20T17:23:12,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4884efb449e54c019849e1b37cd6ba12, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T17:23:12,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0e2c16046f40377cebf7837c5395d623 in 854ms, sequenceid=204, compaction requested=false 2024-11-20T17:23:12,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:12,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:23:12,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:12,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:12,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:12,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:12,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:12,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:12,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/a37877cf6d9d4ab3bfa7249e44d19b85 is 50, key is test_row_0/A:col10/1732123392113/Put/seqid=0 2024-11-20T17:23:12,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742291_1467 (size=14541) 2024-11-20T17:23:12,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123452151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123452152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123452152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123452155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,191 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123452259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123452259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123452259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123452260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,343 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123452385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123452463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123452463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123452463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123452465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/a37877cf6d9d4ab3bfa7249e44d19b85 2024-11-20T17:23:12,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:12,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/2050662ea6494106b22116fc73c36f4e is 50, key is test_row_0/B:col10/1732123392113/Put/seqid=0 2024-11-20T17:23:12,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742292_1468 (size=12151) 2024-11-20T17:23:12,649 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123452768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123452768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123452769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123452770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:12,802 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/2050662ea6494106b22116fc73c36f4e 2024-11-20T17:23:12,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/32fb14e8413d439a9af5a397cc9d259b is 50, key is test_row_0/C:col10/1732123392113/Put/seqid=0 2024-11-20T17:23:12,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742293_1469 (size=12151) 2024-11-20T17:23:12,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:12,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:12,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:12,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:12,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:13,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:13,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:13,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,260 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:13,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:13,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:13,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123453275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123453276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123453276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123453277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/32fb14e8413d439a9af5a397cc9d259b 2024-11-20T17:23:13,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/a37877cf6d9d4ab3bfa7249e44d19b85 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/a37877cf6d9d4ab3bfa7249e44d19b85 2024-11-20T17:23:13,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/a37877cf6d9d4ab3bfa7249e44d19b85, entries=200, sequenceid=220, filesize=14.2 K 2024-11-20T17:23:13,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/2050662ea6494106b22116fc73c36f4e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/2050662ea6494106b22116fc73c36f4e 2024-11-20T17:23:13,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/2050662ea6494106b22116fc73c36f4e, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T17:23:13,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/32fb14e8413d439a9af5a397cc9d259b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/32fb14e8413d439a9af5a397cc9d259b 2024-11-20T17:23:13,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/32fb14e8413d439a9af5a397cc9d259b, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T17:23:13,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0e2c16046f40377cebf7837c5395d623 in 1247ms, sequenceid=220, compaction requested=true 2024-11-20T17:23:13,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:13,362 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:13,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:13,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:13,362 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:13,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:13,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:13,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:13,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:13,362 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:13,362 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:13,363 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:13,363 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,363 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:13,363 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1a403d16a842483a811dc554356c8688, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/69be695c737a476c956c2305cb549a96, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/a37877cf6d9d4ab3bfa7249e44d19b85] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=40.7 K 2024-11-20T17:23:13,363 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,363 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3d17deabb50b4a89b5d060c76ffa32fa, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/6486e7dd110d486c9b412c6091ac91a6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/2050662ea6494106b22116fc73c36f4e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=36.0 K 2024-11-20T17:23:13,363 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a403d16a842483a811dc554356c8688, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732123389334 2024-11-20T17:23:13,363 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d17deabb50b4a89b5d060c76ffa32fa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732123389334 2024-11-20T17:23:13,363 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69be695c737a476c956c2305cb549a96, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732123389966 2024-11-20T17:23:13,363 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6486e7dd110d486c9b412c6091ac91a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732123389978 2024-11-20T17:23:13,364 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting a37877cf6d9d4ab3bfa7249e44d19b85, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732123391252 2024-11-20T17:23:13,364 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 2050662ea6494106b22116fc73c36f4e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732123391252 2024-11-20T17:23:13,370 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#400 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:13,370 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/ec6249f718dc4a2cb23e530000af125d is 50, key is test_row_0/B:col10/1732123392113/Put/seqid=0 2024-11-20T17:23:13,370 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:13,371 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/7459c0d84f394567843ff908293af75a is 50, key is test_row_0/A:col10/1732123392113/Put/seqid=0 2024-11-20T17:23:13,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742294_1470 (size=12697) 2024-11-20T17:23:13,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742295_1471 (size=12697) 2024-11-20T17:23:13,378 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/7459c0d84f394567843ff908293af75a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7459c0d84f394567843ff908293af75a 2024-11-20T17:23:13,383 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into 7459c0d84f394567843ff908293af75a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:13,383 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:13,383 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=13, startTime=1732123393361; duration=0sec 2024-11-20T17:23:13,383 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:13,383 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:13,383 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:13,384 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:13,384 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:13,384 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,384 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/8f96876136b04a22976c493c9715d29a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4884efb449e54c019849e1b37cd6ba12, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/32fb14e8413d439a9af5a397cc9d259b] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=36.0 K 2024-11-20T17:23:13,385 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f96876136b04a22976c493c9715d29a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732123389334 2024-11-20T17:23:13,385 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4884efb449e54c019849e1b37cd6ba12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732123389978 2024-11-20T17:23:13,385 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32fb14e8413d439a9af5a397cc9d259b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732123391252 2024-11-20T17:23:13,391 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#402 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:13,392 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/dcc8618985074e96964cf52cf5ef343b is 50, key is test_row_0/C:col10/1732123392113/Put/seqid=0 2024-11-20T17:23:13,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742296_1472 (size=12697) 2024-11-20T17:23:13,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:13,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:23:13,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:13,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:13,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:13,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:13,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:13,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:13,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/08dc313a0dad4ab893aeb07507bcc354 is 50, key is test_row_0/A:col10/1732123392154/Put/seqid=0 2024-11-20T17:23:13,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742297_1473 (size=14541) 2024-11-20T17:23:13,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:13,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:13,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:13,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123453444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:13,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123453552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,565 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:13,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:13,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:13,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:13,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:13,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:13,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123453757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:13,779 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/ec6249f718dc4a2cb23e530000af125d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ec6249f718dc4a2cb23e530000af125d 2024-11-20T17:23:13,782 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into ec6249f718dc4a2cb23e530000af125d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:13,782 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:13,782 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=13, startTime=1732123393362; duration=0sec 2024-11-20T17:23:13,782 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:13,782 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:13,799 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/dcc8618985074e96964cf52cf5ef343b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dcc8618985074e96964cf52cf5ef343b 2024-11-20T17:23:13,802 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into dcc8618985074e96964cf52cf5ef343b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:13,802 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:13,802 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=13, startTime=1732123393362; duration=0sec 2024-11-20T17:23:13,803 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:13,803 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:13,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/08dc313a0dad4ab893aeb07507bcc354 2024-11-20T17:23:13,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/ab4156912f7f4d9284fa473892725e29 is 50, key is test_row_0/B:col10/1732123392154/Put/seqid=0 2024-11-20T17:23:13,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742298_1474 (size=12151) 2024-11-20T17:23:13,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:13,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:13,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:13,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:13,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,023 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:14,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:14,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:14,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:14,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123454061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:14,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:14,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:14,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:14,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/ab4156912f7f4d9284fa473892725e29 2024-11-20T17:23:14,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/dd03f6d87a28492a891d20e18cf3bee1 is 50, key is test_row_0/C:col10/1732123392154/Put/seqid=0 2024-11-20T17:23:14,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742299_1475 (size=12151) 2024-11-20T17:23:14,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123454283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:14,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123454283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:14,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:14,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123454285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:14,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123454286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:14,328 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:14,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:14,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:14,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,481 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:14,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:14,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123454565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:14,633 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:14,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:14,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:14,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:14,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/dd03f6d87a28492a891d20e18cf3bee1 2024-11-20T17:23:14,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/08dc313a0dad4ab893aeb07507bcc354 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/08dc313a0dad4ab893aeb07507bcc354 2024-11-20T17:23:14,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/08dc313a0dad4ab893aeb07507bcc354, entries=200, sequenceid=245, filesize=14.2 K 2024-11-20T17:23:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/ab4156912f7f4d9284fa473892725e29 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ab4156912f7f4d9284fa473892725e29 2024-11-20T17:23:14,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ab4156912f7f4d9284fa473892725e29, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T17:23:14,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/dd03f6d87a28492a891d20e18cf3bee1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dd03f6d87a28492a891d20e18cf3bee1 2024-11-20T17:23:14,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dd03f6d87a28492a891d20e18cf3bee1, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T17:23:14,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0e2c16046f40377cebf7837c5395d623 in 1253ms, sequenceid=245, compaction requested=false 2024-11-20T17:23:14,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:14,786 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:14,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:14,787 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:14,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:14,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b80705365fd245ce95da52717feeda3d is 50, key is test_row_0/A:col10/1732123393407/Put/seqid=0 2024-11-20T17:23:14,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742300_1476 (size=12151) 2024-11-20T17:23:15,196 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b80705365fd245ce95da52717feeda3d 2024-11-20T17:23:15,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/700c1a2f2d5f4626a0e1874431fb2c20 is 50, key is test_row_0/B:col10/1732123393407/Put/seqid=0 2024-11-20T17:23:15,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742301_1477 (size=12151) 2024-11-20T17:23:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:15,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:15,608 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/700c1a2f2d5f4626a0e1874431fb2c20 2024-11-20T17:23:15,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/403f0efd465a4dd584bebb7fe177c361 is 50, key is test_row_0/C:col10/1732123393407/Put/seqid=0 2024-11-20T17:23:15,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742302_1478 (size=12151) 2024-11-20T17:23:15,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:15,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123455673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:15,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:15,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123455779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:15,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:15,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123455981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,020 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/403f0efd465a4dd584bebb7fe177c361 2024-11-20T17:23:16,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/b80705365fd245ce95da52717feeda3d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b80705365fd245ce95da52717feeda3d 2024-11-20T17:23:16,027 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b80705365fd245ce95da52717feeda3d, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T17:23:16,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/700c1a2f2d5f4626a0e1874431fb2c20 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/700c1a2f2d5f4626a0e1874431fb2c20 2024-11-20T17:23:16,031 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/700c1a2f2d5f4626a0e1874431fb2c20, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T17:23:16,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/403f0efd465a4dd584bebb7fe177c361 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/403f0efd465a4dd584bebb7fe177c361 2024-11-20T17:23:16,035 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/403f0efd465a4dd584bebb7fe177c361, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T17:23:16,035 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0e2c16046f40377cebf7837c5395d623 in 1248ms, sequenceid=259, compaction requested=true 2024-11-20T17:23:16,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:16,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:16,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T17:23:16,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T17:23:16,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T17:23:16,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.6090 sec 2024-11-20T17:23:16,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 4.6120 sec 2024-11-20T17:23:16,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:16,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:23:16,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:16,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:16,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:16,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:16,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:16,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:16,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/d87934d2b6514affb39a2f12c180ac26 is 50, key is test_row_0/A:col10/1732123395672/Put/seqid=0 2024-11-20T17:23:16,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742303_1479 (size=14741) 2024-11-20T17:23:16,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123456304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,308 DEBUG [Thread-1896 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123456305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123456305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123456306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123456307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123456411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123456411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123456411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123456413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123456614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123456614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123456615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123456617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/d87934d2b6514affb39a2f12c180ac26 2024-11-20T17:23:16,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/99594a1622a34e45aa944d685f17d438 is 50, key is test_row_0/B:col10/1732123395672/Put/seqid=0 2024-11-20T17:23:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742304_1480 (size=12301) 2024-11-20T17:23:16,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123456919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123456920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123456920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:16,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123456922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:17,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/99594a1622a34e45aa944d685f17d438 2024-11-20T17:23:17,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/df3d5af80b534965b98a0e2ebfebb307 is 50, key is test_row_0/C:col10/1732123395672/Put/seqid=0 2024-11-20T17:23:17,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742305_1481 (size=12301) 2024-11-20T17:23:17,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:17,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123457425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:17,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:17,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123457427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:17,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:17,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123457428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:17,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:17,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123457429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:17,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/df3d5af80b534965b98a0e2ebfebb307 2024-11-20T17:23:17,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/d87934d2b6514affb39a2f12c180ac26 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d87934d2b6514affb39a2f12c180ac26 2024-11-20T17:23:17,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d87934d2b6514affb39a2f12c180ac26, entries=200, sequenceid=284, filesize=14.4 K 2024-11-20T17:23:17,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/99594a1622a34e45aa944d685f17d438 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/99594a1622a34e45aa944d685f17d438 2024-11-20T17:23:17,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/99594a1622a34e45aa944d685f17d438, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T17:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/df3d5af80b534965b98a0e2ebfebb307 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/df3d5af80b534965b98a0e2ebfebb307 2024-11-20T17:23:17,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/df3d5af80b534965b98a0e2ebfebb307, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T17:23:17,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0e2c16046f40377cebf7837c5395d623 in 1250ms, sequenceid=284, compaction requested=true 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:17,539 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:23:17,539 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:17,540 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49300 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:23:17,540 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54130 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:23:17,540 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:17,540 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:17,540 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:17,540 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:17,540 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ec6249f718dc4a2cb23e530000af125d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ab4156912f7f4d9284fa473892725e29, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/700c1a2f2d5f4626a0e1874431fb2c20, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/99594a1622a34e45aa944d685f17d438] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=48.1 K 2024-11-20T17:23:17,540 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7459c0d84f394567843ff908293af75a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/08dc313a0dad4ab893aeb07507bcc354, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b80705365fd245ce95da52717feeda3d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d87934d2b6514affb39a2f12c180ac26] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=52.9 K 2024-11-20T17:23:17,540 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ec6249f718dc4a2cb23e530000af125d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732123391252 2024-11-20T17:23:17,540 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7459c0d84f394567843ff908293af75a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732123391252 2024-11-20T17:23:17,541 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ab4156912f7f4d9284fa473892725e29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732123392140 2024-11-20T17:23:17,541 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08dc313a0dad4ab893aeb07507bcc354, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732123392140 2024-11-20T17:23:17,541 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 700c1a2f2d5f4626a0e1874431fb2c20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732123393407 2024-11-20T17:23:17,541 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b80705365fd245ce95da52717feeda3d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732123393407 2024-11-20T17:23:17,541 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting d87934d2b6514affb39a2f12c180ac26, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732123395643 2024-11-20T17:23:17,541 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 99594a1622a34e45aa944d685f17d438, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732123395643 2024-11-20T17:23:17,549 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:17,549 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/40bfa374db6b4b449586ce88de8e1563 is 50, key is test_row_0/B:col10/1732123395672/Put/seqid=0 2024-11-20T17:23:17,550 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#413 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:17,551 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/1c50de7392a24e638844abf49aa69b52 is 50, key is test_row_0/A:col10/1732123395672/Put/seqid=0 2024-11-20T17:23:17,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742306_1482 (size=12983) 2024-11-20T17:23:17,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742307_1483 (size=12983) 2024-11-20T17:23:17,562 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/1c50de7392a24e638844abf49aa69b52 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1c50de7392a24e638844abf49aa69b52 2024-11-20T17:23:17,576 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into 1c50de7392a24e638844abf49aa69b52(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:17,576 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:17,576 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=12, startTime=1732123397539; duration=0sec 2024-11-20T17:23:17,577 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:17,577 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:17,577 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:23:17,578 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49300 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:23:17,579 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:17,579 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:17,579 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dcc8618985074e96964cf52cf5ef343b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dd03f6d87a28492a891d20e18cf3bee1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/403f0efd465a4dd584bebb7fe177c361, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/df3d5af80b534965b98a0e2ebfebb307] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=48.1 K 2024-11-20T17:23:17,579 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcc8618985074e96964cf52cf5ef343b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732123391252 2024-11-20T17:23:17,579 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd03f6d87a28492a891d20e18cf3bee1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732123392140 2024-11-20T17:23:17,580 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 403f0efd465a4dd584bebb7fe177c361, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732123393407 2024-11-20T17:23:17,582 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting df3d5af80b534965b98a0e2ebfebb307, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732123395643 2024-11-20T17:23:17,592 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:17,592 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/bcd6f1a38405442b99489915a79139cf is 50, key is test_row_0/C:col10/1732123395672/Put/seqid=0 2024-11-20T17:23:17,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742308_1484 (size=12983) 2024-11-20T17:23:17,960 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/40bfa374db6b4b449586ce88de8e1563 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/40bfa374db6b4b449586ce88de8e1563 2024-11-20T17:23:17,964 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into 40bfa374db6b4b449586ce88de8e1563(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:17,964 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:17,964 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=12, startTime=1732123397539; duration=0sec 2024-11-20T17:23:17,964 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:17,964 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:18,002 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/bcd6f1a38405442b99489915a79139cf as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bcd6f1a38405442b99489915a79139cf 2024-11-20T17:23:18,006 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into bcd6f1a38405442b99489915a79139cf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:18,006 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:18,006 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=12, startTime=1732123397539; duration=0sec 2024-11-20T17:23:18,006 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:18,006 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:18,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:23:18,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:18,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:18,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:18,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:18,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:18,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:18,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/9b207b4eb3484e55b40a87b50843bc31 is 50, key is test_row_0/A:col10/1732123396306/Put/seqid=0 2024-11-20T17:23:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742309_1485 (size=14741) 2024-11-20T17:23:18,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/9b207b4eb3484e55b40a87b50843bc31 2024-11-20T17:23:18,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/45068e46c4234261a472a4506b65592e is 50, key is test_row_0/B:col10/1732123396306/Put/seqid=0 2024-11-20T17:23:18,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742310_1486 (size=12301) 2024-11-20T17:23:18,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123458490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123458497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123458498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123458498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123458599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123458603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123458603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123458603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123458803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123458807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123458808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123458808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:18,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/45068e46c4234261a472a4506b65592e 2024-11-20T17:23:18,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/936458abf50d49f7bc242350db6358fd is 50, key is test_row_0/C:col10/1732123396306/Put/seqid=0 2024-11-20T17:23:18,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742311_1487 (size=12301) 2024-11-20T17:23:19,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123459109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123459112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123459112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123459114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/936458abf50d49f7bc242350db6358fd 2024-11-20T17:23:19,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/9b207b4eb3484e55b40a87b50843bc31 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/9b207b4eb3484e55b40a87b50843bc31 2024-11-20T17:23:19,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/9b207b4eb3484e55b40a87b50843bc31, entries=200, sequenceid=299, filesize=14.4 K 2024-11-20T17:23:19,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/45068e46c4234261a472a4506b65592e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/45068e46c4234261a472a4506b65592e 2024-11-20T17:23:19,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/45068e46c4234261a472a4506b65592e, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T17:23:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/936458abf50d49f7bc242350db6358fd as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/936458abf50d49f7bc242350db6358fd 2024-11-20T17:23:19,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/936458abf50d49f7bc242350db6358fd, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T17:23:19,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 0e2c16046f40377cebf7837c5395d623 in 859ms, sequenceid=299, compaction requested=false 2024-11-20T17:23:19,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:19,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T17:23:19,533 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T17:23:19,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:19,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T17:23:19,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:19,536 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:19,536 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:19,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:19,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:19,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:23:19,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:19,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:19,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:19,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:19,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:19,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:19,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/f5fb5ffe41d74b909bed2125a4d33b4a is 50, key is test_row_0/A:col10/1732123399617/Put/seqid=0 2024-11-20T17:23:19,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742312_1488 (size=14741) 2024-11-20T17:23:19,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123459630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123459630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123459631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123459632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:19,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:19,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:19,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:19,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:19,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123459741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123459741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123459742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123459741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:19,841 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:19,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:19,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:19,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:19,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:19,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,893 DEBUG [Thread-1899 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:55266 2024-11-20T17:23:19,893 DEBUG [Thread-1899 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:19,896 DEBUG [Thread-1907 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:55266 2024-11-20T17:23:19,896 DEBUG [Thread-1907 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:19,898 DEBUG [Thread-1903 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:55266 2024-11-20T17:23:19,898 DEBUG [Thread-1903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:19,899 DEBUG [Thread-1901 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:55266 2024-11-20T17:23:19,899 DEBUG [Thread-1901 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:19,900 DEBUG [Thread-1905 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:55266 2024-11-20T17:23:19,901 DEBUG [Thread-1905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:19,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123459948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123459948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123459948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123459949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:19,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:19,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:19,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:19,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:19,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:19,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/f5fb5ffe41d74b909bed2125a4d33b4a 2024-11-20T17:23:20,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/c5d2ccd8cb0f4b36a8f2db36a35ce686 is 50, key is test_row_0/B:col10/1732123399617/Put/seqid=0 2024-11-20T17:23:20,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742313_1489 (size=12301) 2024-11-20T17:23:20,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:20,146 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:20,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:20,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:20,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123460250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123460250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123460250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123460251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,299 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:20,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45576 deadline: 1732123460348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,349 DEBUG [Thread-1896 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8197 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., hostname=d514dc944523,40121,1732123262111, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:20,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/c5d2ccd8cb0f4b36a8f2db36a35ce686 2024-11-20T17:23:20,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/6932eb5f5dee412fbc2916832651fe4e is 50, key is test_row_0/C:col10/1732123399617/Put/seqid=0 2024-11-20T17:23:20,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742314_1490 (size=12301) 2024-11-20T17:23:20,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:20,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:20,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:20,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,603 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:20,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:20,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:20,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:20,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45616 deadline: 1732123460751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45608 deadline: 1732123460752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45582 deadline: 1732123460752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:20,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45570 deadline: 1732123460753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:20,756 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:20,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:20,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/6932eb5f5dee412fbc2916832651fe4e 2024-11-20T17:23:20,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/f5fb5ffe41d74b909bed2125a4d33b4a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f5fb5ffe41d74b909bed2125a4d33b4a 2024-11-20T17:23:20,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f5fb5ffe41d74b909bed2125a4d33b4a, entries=200, sequenceid=325, filesize=14.4 K 2024-11-20T17:23:20,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/c5d2ccd8cb0f4b36a8f2db36a35ce686 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/c5d2ccd8cb0f4b36a8f2db36a35ce686 2024-11-20T17:23:20,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/c5d2ccd8cb0f4b36a8f2db36a35ce686, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T17:23:20,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/6932eb5f5dee412fbc2916832651fe4e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/6932eb5f5dee412fbc2916832651fe4e 2024-11-20T17:23:20,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/6932eb5f5dee412fbc2916832651fe4e, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T17:23:20,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 0e2c16046f40377cebf7837c5395d623 in 1240ms, sequenceid=325, compaction requested=true 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:20,860 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e2c16046f40377cebf7837c5395d623:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:20,860 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:20,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42465 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/B is initiating minor compaction (all files) 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/A is initiating minor compaction (all files) 2024-11-20T17:23:20,861 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/B in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,861 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/A in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,861 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1c50de7392a24e638844abf49aa69b52, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/9b207b4eb3484e55b40a87b50843bc31, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f5fb5ffe41d74b909bed2125a4d33b4a] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=41.5 K 2024-11-20T17:23:20,861 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/40bfa374db6b4b449586ce88de8e1563, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/45068e46c4234261a472a4506b65592e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/c5d2ccd8cb0f4b36a8f2db36a35ce686] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=36.7 K 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c50de7392a24e638844abf49aa69b52, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732123395643 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 40bfa374db6b4b449586ce88de8e1563, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732123395643 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 45068e46c4234261a472a4506b65592e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732123396305 2024-11-20T17:23:20,861 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b207b4eb3484e55b40a87b50843bc31, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732123396298 2024-11-20T17:23:20,862 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c5d2ccd8cb0f4b36a8f2db36a35ce686, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123398489 2024-11-20T17:23:20,862 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5fb5ffe41d74b909bed2125a4d33b4a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123398489 2024-11-20T17:23:20,867 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#B#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:20,867 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#A#compaction#422 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:20,867 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/3146314d208646f3a24e51e18ef3dea1 is 50, key is test_row_0/B:col10/1732123399617/Put/seqid=0 2024-11-20T17:23:20,867 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/10d6014e51d0479b823e0cae2d6f7fac is 50, key is test_row_0/A:col10/1732123399617/Put/seqid=0 2024-11-20T17:23:20,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742316_1492 (size=13085) 2024-11-20T17:23:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742315_1491 (size=13085) 2024-11-20T17:23:20,908 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:20,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T17:23:20,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:20,909 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:23:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:20,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:20,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/f89dbe6bd5a8480e8c104efe34d93ea9 is 50, key is test_row_0/A:col10/1732123399629/Put/seqid=0 2024-11-20T17:23:20,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742317_1493 (size=12301) 2024-11-20T17:23:21,274 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/3146314d208646f3a24e51e18ef3dea1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3146314d208646f3a24e51e18ef3dea1 2024-11-20T17:23:21,274 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/10d6014e51d0479b823e0cae2d6f7fac as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/10d6014e51d0479b823e0cae2d6f7fac 2024-11-20T17:23:21,278 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/B of 0e2c16046f40377cebf7837c5395d623 into 3146314d208646f3a24e51e18ef3dea1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:21,278 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/A of 0e2c16046f40377cebf7837c5395d623 into 10d6014e51d0479b823e0cae2d6f7fac(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:21,278 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/A, priority=13, startTime=1732123400860; duration=0sec 2024-11-20T17:23:21,278 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/B, priority=13, startTime=1732123400860; duration=0sec 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:B 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:21,278 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:A 2024-11-20T17:23:21,279 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:21,279 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 0e2c16046f40377cebf7837c5395d623/C is initiating minor compaction (all files) 2024-11-20T17:23:21,279 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0e2c16046f40377cebf7837c5395d623/C in TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:21,279 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bcd6f1a38405442b99489915a79139cf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/936458abf50d49f7bc242350db6358fd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/6932eb5f5dee412fbc2916832651fe4e] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp, totalSize=36.7 K 2024-11-20T17:23:21,279 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd6f1a38405442b99489915a79139cf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732123395643 2024-11-20T17:23:21,280 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 936458abf50d49f7bc242350db6358fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732123396305 2024-11-20T17:23:21,280 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6932eb5f5dee412fbc2916832651fe4e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732123398489 2024-11-20T17:23:21,285 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e2c16046f40377cebf7837c5395d623#C#compaction#424 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:21,285 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/4a0240ca45f143c7a5b671b5669624e5 is 50, key is test_row_0/C:col10/1732123399617/Put/seqid=0 2024-11-20T17:23:21,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742318_1494 (size=13085) 2024-11-20T17:23:21,316 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/f89dbe6bd5a8480e8c104efe34d93ea9 2024-11-20T17:23:21,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/358278ef8b09461ab0139aaba537de34 is 50, key is test_row_0/B:col10/1732123399629/Put/seqid=0 2024-11-20T17:23:21,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742319_1495 (size=12301) 2024-11-20T17:23:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:21,693 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/4a0240ca45f143c7a5b671b5669624e5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4a0240ca45f143c7a5b671b5669624e5 2024-11-20T17:23:21,696 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0e2c16046f40377cebf7837c5395d623/C of 0e2c16046f40377cebf7837c5395d623 into 4a0240ca45f143c7a5b671b5669624e5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:21,696 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:21,696 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623., storeName=0e2c16046f40377cebf7837c5395d623/C, priority=13, startTime=1732123400860; duration=0sec 2024-11-20T17:23:21,697 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:21,697 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e2c16046f40377cebf7837c5395d623:C 2024-11-20T17:23:21,725 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/358278ef8b09461ab0139aaba537de34 2024-11-20T17:23:21,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/bed0fc6032b94552902dfdde0b12bd4b is 50, key is test_row_0/C:col10/1732123399629/Put/seqid=0 2024-11-20T17:23:21,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742320_1496 (size=12301) 2024-11-20T17:23:21,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:21,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. as already flushing 2024-11-20T17:23:21,754 DEBUG [Thread-1892 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:55266 2024-11-20T17:23:21,755 DEBUG [Thread-1892 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:21,755 DEBUG [Thread-1890 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:55266 2024-11-20T17:23:21,755 DEBUG [Thread-1890 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:21,762 DEBUG [Thread-1888 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:55266 2024-11-20T17:23:21,762 DEBUG [Thread-1888 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:21,763 DEBUG [Thread-1894 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:55266 2024-11-20T17:23:21,763 DEBUG [Thread-1894 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:22,134 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/bed0fc6032b94552902dfdde0b12bd4b 2024-11-20T17:23:22,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/f89dbe6bd5a8480e8c104efe34d93ea9 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f89dbe6bd5a8480e8c104efe34d93ea9 2024-11-20T17:23:22,140 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f89dbe6bd5a8480e8c104efe34d93ea9, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T17:23:22,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/358278ef8b09461ab0139aaba537de34 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/358278ef8b09461ab0139aaba537de34 2024-11-20T17:23:22,143 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/358278ef8b09461ab0139aaba537de34, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T17:23:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/bed0fc6032b94552902dfdde0b12bd4b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bed0fc6032b94552902dfdde0b12bd4b 2024-11-20T17:23:22,146 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bed0fc6032b94552902dfdde0b12bd4b, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T17:23:22,147 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=26.84 KB/27480 for 0e2c16046f40377cebf7837c5395d623 in 1238ms, sequenceid=336, compaction requested=false 2024-11-20T17:23:22,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:22,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:22,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T17:23:22,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T17:23:22,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T17:23:22,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6120 sec 2024-11-20T17:23:22,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.6160 sec 2024-11-20T17:23:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:23:23,640 INFO [Thread-1898 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T17:23:30,376 DEBUG [Thread-1896 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:55266 2024-11-20T17:23:30,376 DEBUG [Thread-1896 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2885 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8655 rows 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2886 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8658 rows 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2872 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8613 rows 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2890 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8670 rows 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2874 2024-11-20T17:23:30,376 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8619 rows 2024-11-20T17:23:30,376 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:23:30,376 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c60eb7d to 127.0.0.1:55266 2024-11-20T17:23:30,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:30,378 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:23:30,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:23:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T17:23:30,382 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123410382"}]},"ts":"1732123410382"} 2024-11-20T17:23:30,384 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:23:30,386 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:23:30,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:23:30,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, UNASSIGN}] 2024-11-20T17:23:30,388 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, UNASSIGN 2024-11-20T17:23:30,388 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=0e2c16046f40377cebf7837c5395d623, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:23:30,389 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:23:30,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; CloseRegionProcedure 0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:23:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T17:23:30,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:23:30,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:30,541 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(124): Close 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1681): Closing 0e2c16046f40377cebf7837c5395d623, disabling compactions & flushes 2024-11-20T17:23:30,541 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. after waiting 0 ms 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:30,541 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(2837): Flushing 0e2c16046f40377cebf7837c5395d623 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=A 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=B 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0e2c16046f40377cebf7837c5395d623, store=C 2024-11-20T17:23:30,541 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:30,545 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/48b935a15b6542879a1fdb19a65117ae is 50, key is test_row_0/A:col10/1732123410375/Put/seqid=0 2024-11-20T17:23:30,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742321_1497 (size=12301) 2024-11-20T17:23:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T17:23:30,949 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/48b935a15b6542879a1fdb19a65117ae 2024-11-20T17:23:30,956 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/d46c0524b36f40519adcf01cca323699 is 50, key is test_row_0/B:col10/1732123410375/Put/seqid=0 2024-11-20T17:23:30,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742322_1498 (size=12301) 2024-11-20T17:23:30,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T17:23:31,361 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/d46c0524b36f40519adcf01cca323699 2024-11-20T17:23:31,367 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/a11961be2e7040f0a3372f9c9cb6953f is 50, key is test_row_0/C:col10/1732123410375/Put/seqid=0 2024-11-20T17:23:31,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742323_1499 (size=12301) 2024-11-20T17:23:31,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T17:23:31,771 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/a11961be2e7040f0a3372f9c9cb6953f 2024-11-20T17:23:31,775 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/A/48b935a15b6542879a1fdb19a65117ae as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/48b935a15b6542879a1fdb19a65117ae 2024-11-20T17:23:31,777 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/48b935a15b6542879a1fdb19a65117ae, entries=150, sequenceid=347, filesize=12.0 K 2024-11-20T17:23:31,778 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/B/d46c0524b36f40519adcf01cca323699 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/d46c0524b36f40519adcf01cca323699 2024-11-20T17:23:31,780 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/d46c0524b36f40519adcf01cca323699, entries=150, sequenceid=347, filesize=12.0 K 2024-11-20T17:23:31,781 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/.tmp/C/a11961be2e7040f0a3372f9c9cb6953f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/a11961be2e7040f0a3372f9c9cb6953f 2024-11-20T17:23:31,783 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/a11961be2e7040f0a3372f9c9cb6953f, entries=150, sequenceid=347, filesize=12.0 K 2024-11-20T17:23:31,784 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 0e2c16046f40377cebf7837c5395d623 in 1243ms, sequenceid=347, compaction requested=true 2024-11-20T17:23:31,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/045cba6be8aa4bbb9a30b16bdea3bd8b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/3bb9851efeb14c10be47700884927e79, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7492edb981454cdfb8ffca0d90f35e00, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/c788e5b43fa746afa08dda588898ba41, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/166013e67f734ca8a5c9b625df08439a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b8e60b095a3244fe9a02057470591e42, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/64845cdadf2a45feac6a331f176e2372, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/00834fbe3e8746b6a1464196427a96c9, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b52e6c1d1d4b4eba802018b09973a781, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/55a432ef572d40dca4621915d1abd7d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/ee7dcff6f3d5428eacc82687945d4946, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/44e04bc8484d491ebe5bc6be921e44ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1a403d16a842483a811dc554356c8688, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d5947b4bcb04454db5d4bfaba19cc806, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/69be695c737a476c956c2305cb549a96, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/a37877cf6d9d4ab3bfa7249e44d19b85, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7459c0d84f394567843ff908293af75a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/08dc313a0dad4ab893aeb07507bcc354, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b80705365fd245ce95da52717feeda3d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d87934d2b6514affb39a2f12c180ac26, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1c50de7392a24e638844abf49aa69b52, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/9b207b4eb3484e55b40a87b50843bc31, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f5fb5ffe41d74b909bed2125a4d33b4a] to archive 2024-11-20T17:23:31,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:23:31,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/045cba6be8aa4bbb9a30b16bdea3bd8b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/045cba6be8aa4bbb9a30b16bdea3bd8b 2024-11-20T17:23:31,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/3bb9851efeb14c10be47700884927e79 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/3bb9851efeb14c10be47700884927e79 2024-11-20T17:23:31,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7492edb981454cdfb8ffca0d90f35e00 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7492edb981454cdfb8ffca0d90f35e00 2024-11-20T17:23:31,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/c788e5b43fa746afa08dda588898ba41 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/c788e5b43fa746afa08dda588898ba41 2024-11-20T17:23:31,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/166013e67f734ca8a5c9b625df08439a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/166013e67f734ca8a5c9b625df08439a 2024-11-20T17:23:31,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b8e60b095a3244fe9a02057470591e42 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b8e60b095a3244fe9a02057470591e42 2024-11-20T17:23:31,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/64845cdadf2a45feac6a331f176e2372 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/64845cdadf2a45feac6a331f176e2372 2024-11-20T17:23:31,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/00834fbe3e8746b6a1464196427a96c9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/00834fbe3e8746b6a1464196427a96c9 2024-11-20T17:23:31,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b52e6c1d1d4b4eba802018b09973a781 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b52e6c1d1d4b4eba802018b09973a781 2024-11-20T17:23:31,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/55a432ef572d40dca4621915d1abd7d1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/55a432ef572d40dca4621915d1abd7d1 2024-11-20T17:23:31,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/ee7dcff6f3d5428eacc82687945d4946 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/ee7dcff6f3d5428eacc82687945d4946 2024-11-20T17:23:31,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/44e04bc8484d491ebe5bc6be921e44ca to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/44e04bc8484d491ebe5bc6be921e44ca 2024-11-20T17:23:31,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1a403d16a842483a811dc554356c8688 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1a403d16a842483a811dc554356c8688 2024-11-20T17:23:31,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d5947b4bcb04454db5d4bfaba19cc806 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d5947b4bcb04454db5d4bfaba19cc806 2024-11-20T17:23:31,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/69be695c737a476c956c2305cb549a96 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/69be695c737a476c956c2305cb549a96 2024-11-20T17:23:31,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/a37877cf6d9d4ab3bfa7249e44d19b85 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/a37877cf6d9d4ab3bfa7249e44d19b85 2024-11-20T17:23:31,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7459c0d84f394567843ff908293af75a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/7459c0d84f394567843ff908293af75a 2024-11-20T17:23:31,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/08dc313a0dad4ab893aeb07507bcc354 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/08dc313a0dad4ab893aeb07507bcc354 2024-11-20T17:23:31,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b80705365fd245ce95da52717feeda3d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/b80705365fd245ce95da52717feeda3d 2024-11-20T17:23:31,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d87934d2b6514affb39a2f12c180ac26 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/d87934d2b6514affb39a2f12c180ac26 2024-11-20T17:23:31,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1c50de7392a24e638844abf49aa69b52 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/1c50de7392a24e638844abf49aa69b52 2024-11-20T17:23:31,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/9b207b4eb3484e55b40a87b50843bc31 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/9b207b4eb3484e55b40a87b50843bc31 2024-11-20T17:23:31,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f5fb5ffe41d74b909bed2125a4d33b4a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f5fb5ffe41d74b909bed2125a4d33b4a 2024-11-20T17:23:31,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/b2fb209b99654aecbcf1331a8f18d09f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/8e0de97c490246808c27341c4fdc2039, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/e0cf4df5ece14445888d77638f398831, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/eb04859b6f4842548a02c96f16181b19, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/bcd025597e534dab84eea8f0f795188e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/51e42221864a4deb85925c68db535dd0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/af5b43f7037e4ca786db7880c69ac78d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f39c3155ae2a42cd85f85d5f8d844aac, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/571bd10e4e7d440cb5ba8dc05af6cdec, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/126bce125320480095820c9127cf9d96, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/892f5a3a66924cdcb22c2f786d2a439e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f1955a9485f84a088cb4825d901a00d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3d17deabb50b4a89b5d060c76ffa32fa, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/116def9dbf9c473791a64cc8971cf985, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/6486e7dd110d486c9b412c6091ac91a6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ec6249f718dc4a2cb23e530000af125d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/2050662ea6494106b22116fc73c36f4e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ab4156912f7f4d9284fa473892725e29, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/700c1a2f2d5f4626a0e1874431fb2c20, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/40bfa374db6b4b449586ce88de8e1563, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/99594a1622a34e45aa944d685f17d438, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/45068e46c4234261a472a4506b65592e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/c5d2ccd8cb0f4b36a8f2db36a35ce686] to archive 2024-11-20T17:23:31,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:23:31,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/b2fb209b99654aecbcf1331a8f18d09f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/b2fb209b99654aecbcf1331a8f18d09f 2024-11-20T17:23:31,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/8e0de97c490246808c27341c4fdc2039 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/8e0de97c490246808c27341c4fdc2039 2024-11-20T17:23:31,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/e0cf4df5ece14445888d77638f398831 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/e0cf4df5ece14445888d77638f398831 2024-11-20T17:23:31,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/eb04859b6f4842548a02c96f16181b19 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/eb04859b6f4842548a02c96f16181b19 2024-11-20T17:23:31,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/bcd025597e534dab84eea8f0f795188e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/bcd025597e534dab84eea8f0f795188e 2024-11-20T17:23:31,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/51e42221864a4deb85925c68db535dd0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/51e42221864a4deb85925c68db535dd0 2024-11-20T17:23:31,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/af5b43f7037e4ca786db7880c69ac78d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/af5b43f7037e4ca786db7880c69ac78d 2024-11-20T17:23:31,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f39c3155ae2a42cd85f85d5f8d844aac to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f39c3155ae2a42cd85f85d5f8d844aac 2024-11-20T17:23:31,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/571bd10e4e7d440cb5ba8dc05af6cdec to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/571bd10e4e7d440cb5ba8dc05af6cdec 2024-11-20T17:23:31,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/126bce125320480095820c9127cf9d96 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/126bce125320480095820c9127cf9d96 2024-11-20T17:23:31,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/892f5a3a66924cdcb22c2f786d2a439e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/892f5a3a66924cdcb22c2f786d2a439e 2024-11-20T17:23:31,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f1955a9485f84a088cb4825d901a00d5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/f1955a9485f84a088cb4825d901a00d5 2024-11-20T17:23:31,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3d17deabb50b4a89b5d060c76ffa32fa to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3d17deabb50b4a89b5d060c76ffa32fa 2024-11-20T17:23:31,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/116def9dbf9c473791a64cc8971cf985 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/116def9dbf9c473791a64cc8971cf985 2024-11-20T17:23:31,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/6486e7dd110d486c9b412c6091ac91a6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/6486e7dd110d486c9b412c6091ac91a6 2024-11-20T17:23:31,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ec6249f718dc4a2cb23e530000af125d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ec6249f718dc4a2cb23e530000af125d 2024-11-20T17:23:31,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/2050662ea6494106b22116fc73c36f4e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/2050662ea6494106b22116fc73c36f4e 2024-11-20T17:23:31,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ab4156912f7f4d9284fa473892725e29 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/ab4156912f7f4d9284fa473892725e29 2024-11-20T17:23:31,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/700c1a2f2d5f4626a0e1874431fb2c20 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/700c1a2f2d5f4626a0e1874431fb2c20 2024-11-20T17:23:31,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/40bfa374db6b4b449586ce88de8e1563 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/40bfa374db6b4b449586ce88de8e1563 2024-11-20T17:23:31,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/99594a1622a34e45aa944d685f17d438 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/99594a1622a34e45aa944d685f17d438 2024-11-20T17:23:31,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/45068e46c4234261a472a4506b65592e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/45068e46c4234261a472a4506b65592e 2024-11-20T17:23:31,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/c5d2ccd8cb0f4b36a8f2db36a35ce686 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/c5d2ccd8cb0f4b36a8f2db36a35ce686 2024-11-20T17:23:31,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/5c4a6daec5b848208b3a2c9f3acf0c8c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/62b66cb9f6df4282af5118bb7c9f8048, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/342b25c854994e18a62e90ad14bc3205, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/23441a4ab41d43a9abe57e1d2fe519fe, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9267d789e57348c993000381b38be58e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/741e782e63b74fe0a64126b5606046a6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/b3fb30ee84e24412a785428bbbff98d0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/331a1b1857ac45a0aa03d60361a0c187, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9cc6749750d34f62bb5b89e08fb66e97, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d7304a2fb97e47ea8da6efeaf3b8361e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/0e6df415fb0b42b38789251b6f748b1f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/85f3e5c460f4462093122a62dd7ec795, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/8f96876136b04a22976c493c9715d29a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d26e93f616f245a3b64c6091026e864e, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4884efb449e54c019849e1b37cd6ba12, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dcc8618985074e96964cf52cf5ef343b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/32fb14e8413d439a9af5a397cc9d259b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dd03f6d87a28492a891d20e18cf3bee1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/403f0efd465a4dd584bebb7fe177c361, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bcd6f1a38405442b99489915a79139cf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/df3d5af80b534965b98a0e2ebfebb307, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/936458abf50d49f7bc242350db6358fd, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/6932eb5f5dee412fbc2916832651fe4e] to archive 2024-11-20T17:23:31,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:23:31,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/5c4a6daec5b848208b3a2c9f3acf0c8c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/5c4a6daec5b848208b3a2c9f3acf0c8c 2024-11-20T17:23:31,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/62b66cb9f6df4282af5118bb7c9f8048 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/62b66cb9f6df4282af5118bb7c9f8048 2024-11-20T17:23:31,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/342b25c854994e18a62e90ad14bc3205 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/342b25c854994e18a62e90ad14bc3205 2024-11-20T17:23:31,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/23441a4ab41d43a9abe57e1d2fe519fe to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/23441a4ab41d43a9abe57e1d2fe519fe 2024-11-20T17:23:31,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9267d789e57348c993000381b38be58e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9267d789e57348c993000381b38be58e 2024-11-20T17:23:31,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/741e782e63b74fe0a64126b5606046a6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/741e782e63b74fe0a64126b5606046a6 2024-11-20T17:23:31,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/b3fb30ee84e24412a785428bbbff98d0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/b3fb30ee84e24412a785428bbbff98d0 2024-11-20T17:23:31,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/331a1b1857ac45a0aa03d60361a0c187 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/331a1b1857ac45a0aa03d60361a0c187 2024-11-20T17:23:31,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9cc6749750d34f62bb5b89e08fb66e97 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/9cc6749750d34f62bb5b89e08fb66e97 2024-11-20T17:23:31,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d7304a2fb97e47ea8da6efeaf3b8361e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d7304a2fb97e47ea8da6efeaf3b8361e 2024-11-20T17:23:31,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/0e6df415fb0b42b38789251b6f748b1f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/0e6df415fb0b42b38789251b6f748b1f 2024-11-20T17:23:31,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/85f3e5c460f4462093122a62dd7ec795 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/85f3e5c460f4462093122a62dd7ec795 2024-11-20T17:23:31,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/8f96876136b04a22976c493c9715d29a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/8f96876136b04a22976c493c9715d29a 2024-11-20T17:23:31,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d26e93f616f245a3b64c6091026e864e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/d26e93f616f245a3b64c6091026e864e 2024-11-20T17:23:31,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4884efb449e54c019849e1b37cd6ba12 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4884efb449e54c019849e1b37cd6ba12 2024-11-20T17:23:31,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dcc8618985074e96964cf52cf5ef343b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dcc8618985074e96964cf52cf5ef343b 2024-11-20T17:23:31,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/32fb14e8413d439a9af5a397cc9d259b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/32fb14e8413d439a9af5a397cc9d259b 2024-11-20T17:23:31,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dd03f6d87a28492a891d20e18cf3bee1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/dd03f6d87a28492a891d20e18cf3bee1 2024-11-20T17:23:31,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/403f0efd465a4dd584bebb7fe177c361 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/403f0efd465a4dd584bebb7fe177c361 2024-11-20T17:23:31,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bcd6f1a38405442b99489915a79139cf to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bcd6f1a38405442b99489915a79139cf 2024-11-20T17:23:31,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/df3d5af80b534965b98a0e2ebfebb307 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/df3d5af80b534965b98a0e2ebfebb307 2024-11-20T17:23:31,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/936458abf50d49f7bc242350db6358fd to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/936458abf50d49f7bc242350db6358fd 2024-11-20T17:23:31,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/6932eb5f5dee412fbc2916832651fe4e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/6932eb5f5dee412fbc2916832651fe4e 2024-11-20T17:23:31,845 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/recovered.edits/350.seqid, newMaxSeqId=350, maxSeqId=1 2024-11-20T17:23:31,846 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623. 2024-11-20T17:23:31,846 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1635): Region close journal for 0e2c16046f40377cebf7837c5395d623: 2024-11-20T17:23:31,847 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(170): Closed 0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:31,848 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=0e2c16046f40377cebf7837c5395d623, regionState=CLOSED 2024-11-20T17:23:31,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T17:23:31,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseRegionProcedure 0e2c16046f40377cebf7837c5395d623, server=d514dc944523,40121,1732123262111 in 1.4600 sec 2024-11-20T17:23:31,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-11-20T17:23:31,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0e2c16046f40377cebf7837c5395d623, UNASSIGN in 1.4630 sec 2024-11-20T17:23:31,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T17:23:31,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4650 sec 2024-11-20T17:23:31,853 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123411853"}]},"ts":"1732123411853"} 2024-11-20T17:23:31,854 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:23:31,856 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:23:31,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4770 sec 2024-11-20T17:23:31,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T17:23:32,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T17:23:32,485 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T17:23:32,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:23:32,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,487 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=146, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T17:23:32,488 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=146, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,489 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:32,490 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/recovered.edits] 2024-11-20T17:23:32,493 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/10d6014e51d0479b823e0cae2d6f7fac to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/10d6014e51d0479b823e0cae2d6f7fac 2024-11-20T17:23:32,494 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/48b935a15b6542879a1fdb19a65117ae to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/48b935a15b6542879a1fdb19a65117ae 2024-11-20T17:23:32,494 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f89dbe6bd5a8480e8c104efe34d93ea9 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/A/f89dbe6bd5a8480e8c104efe34d93ea9 2024-11-20T17:23:32,496 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3146314d208646f3a24e51e18ef3dea1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/3146314d208646f3a24e51e18ef3dea1 2024-11-20T17:23:32,497 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/358278ef8b09461ab0139aaba537de34 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/358278ef8b09461ab0139aaba537de34 2024-11-20T17:23:32,498 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/d46c0524b36f40519adcf01cca323699 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/B/d46c0524b36f40519adcf01cca323699 2024-11-20T17:23:32,499 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4a0240ca45f143c7a5b671b5669624e5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/4a0240ca45f143c7a5b671b5669624e5 2024-11-20T17:23:32,500 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/a11961be2e7040f0a3372f9c9cb6953f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/a11961be2e7040f0a3372f9c9cb6953f 2024-11-20T17:23:32,501 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bed0fc6032b94552902dfdde0b12bd4b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/C/bed0fc6032b94552902dfdde0b12bd4b 2024-11-20T17:23:32,503 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/recovered.edits/350.seqid to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623/recovered.edits/350.seqid 2024-11-20T17:23:32,504 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/0e2c16046f40377cebf7837c5395d623 2024-11-20T17:23:32,504 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:23:32,506 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=146, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,507 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:23:32,509 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:23:32,509 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=146, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,509 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:23:32,509 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123412509"}]},"ts":"9223372036854775807"} 2024-11-20T17:23:32,511 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:23:32,511 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0e2c16046f40377cebf7837c5395d623, NAME => 'TestAcidGuarantees,,1732123377737.0e2c16046f40377cebf7837c5395d623.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:23:32,511 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:23:32,511 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123412511"}]},"ts":"9223372036854775807"} 2024-11-20T17:23:32,512 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:23:32,515 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=146, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 29 msec 2024-11-20T17:23:32,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T17:23:32,588 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-20T17:23:32,598 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238 (was 239), OpenFileDescriptor=443 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=267 (was 288), ProcessCount=11 (was 11), AvailableMemoryMB=6141 (was 6139) - AvailableMemoryMB LEAK? - 2024-11-20T17:23:32,606 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=267, ProcessCount=11, AvailableMemoryMB=6141 2024-11-20T17:23:32,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:23:32,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:23:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:32,608 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:23:32,609 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:32,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 147 2024-11-20T17:23:32,609 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:23:32,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T17:23:32,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742324_1500 (size=960) 2024-11-20T17:23:32,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T17:23:32,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T17:23:33,016 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0 2024-11-20T17:23:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742325_1501 (size=53) 2024-11-20T17:23:33,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T17:23:33,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:23:33,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 47d6ffc4e6051fed3b2d9d69f1d3f3c0, disabling compactions & flushes 2024-11-20T17:23:33,421 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. after waiting 0 ms 2024-11-20T17:23:33,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,421 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:33,422 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:23:33,422 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123413422"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123413422"}]},"ts":"1732123413422"} 2024-11-20T17:23:33,423 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:23:33,424 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:23:33,424 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123413424"}]},"ts":"1732123413424"} 2024-11-20T17:23:33,425 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:23:33,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, ASSIGN}] 2024-11-20T17:23:33,429 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, ASSIGN 2024-11-20T17:23:33,430 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, ASSIGN; state=OFFLINE, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=false 2024-11-20T17:23:33,580 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:23:33,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; OpenRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:23:33,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T17:23:33,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:33,735 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,735 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7285): Opening region: {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:23:33,735 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,735 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:23:33,736 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7327): checking encryption for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,736 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7330): checking classloading for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,737 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,738 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:23:33,738 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 columnFamilyName A 2024-11-20T17:23:33,738 DEBUG [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:33,738 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(327): Store=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:23:33,738 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,739 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:23:33,739 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 columnFamilyName B 2024-11-20T17:23:33,739 DEBUG [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:33,739 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(327): Store=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:23:33,739 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,740 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:23:33,740 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 columnFamilyName C 2024-11-20T17:23:33,740 DEBUG [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:33,741 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(327): Store=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:23:33,741 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,741 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,741 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,742 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:23:33,743 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1085): writing seq id for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:33,744 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:23:33,745 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1102): Opened 47d6ffc4e6051fed3b2d9d69f1d3f3c0; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72570124, jitterRate=0.08137911558151245}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:23:33,745 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1001): Region open journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:33,746 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., pid=149, masterSystemTime=1732123413732 2024-11-20T17:23:33,747 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,747 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:33,747 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:23:33,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-20T17:23:33,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; OpenRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 in 167 msec 2024-11-20T17:23:33,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-11-20T17:23:33,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, ASSIGN in 321 msec 2024-11-20T17:23:33,751 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:23:33,751 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123413751"}]},"ts":"1732123413751"} 2024-11-20T17:23:33,752 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:23:33,754 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:23:33,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-11-20T17:23:34,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-11-20T17:23:34,713 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 147 completed 2024-11-20T17:23:34,715 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-20T17:23:34,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:34,722 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:34,724 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:34,725 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:23:34,725 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:23:34,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:23:34,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:23:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T17:23:34,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742326_1502 (size=996) 2024-11-20T17:23:35,137 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T17:23:35,137 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T17:23:35,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:23:35,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, REOPEN/MOVE}] 2024-11-20T17:23:35,141 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, REOPEN/MOVE 2024-11-20T17:23:35,141 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,142 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:23:35,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:23:35,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:35,294 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,294 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:23:35,294 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing 47d6ffc4e6051fed3b2d9d69f1d3f3c0, disabling compactions & flushes 2024-11-20T17:23:35,294 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,294 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,294 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. after waiting 0 ms 2024-11-20T17:23:35,294 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,297 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T17:23:35,298 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,298 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:35,298 WARN [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionServer(3786): Not adding moved region record: 47d6ffc4e6051fed3b2d9d69f1d3f3c0 to self. 2024-11-20T17:23:35,299 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,299 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=CLOSED 2024-11-20T17:23:35,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T17:23:35,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 in 158 msec 2024-11-20T17:23:35,302 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, REOPEN/MOVE; state=CLOSED, location=d514dc944523,40121,1732123262111; forceNewPlan=false, retain=true 2024-11-20T17:23:35,452 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=OPENING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=152, state=RUNNABLE; OpenRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:23:35,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:35,607 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,607 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7285): Opening region: {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:23:35,607 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,607 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:23:35,607 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7327): checking encryption for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,607 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7330): checking classloading for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,608 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,609 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:23:35,609 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 columnFamilyName A 2024-11-20T17:23:35,610 DEBUG [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:35,610 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(327): Store=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:23:35,611 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,611 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:23:35,611 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 columnFamilyName B 2024-11-20T17:23:35,611 DEBUG [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:35,611 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(327): Store=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:23:35,612 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,612 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:23:35,612 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47d6ffc4e6051fed3b2d9d69f1d3f3c0 columnFamilyName C 2024-11-20T17:23:35,612 DEBUG [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:35,612 INFO [StoreOpener-47d6ffc4e6051fed3b2d9d69f1d3f3c0-1 {}] regionserver.HStore(327): Store=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:23:35,612 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,613 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,614 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,615 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:23:35,616 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1085): writing seq id for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,616 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1102): Opened 47d6ffc4e6051fed3b2d9d69f1d3f3c0; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64144677, jitterRate=-0.0441698282957077}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:23:35,617 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1001): Region open journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:35,617 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., pid=154, masterSystemTime=1732123415604 2024-11-20T17:23:35,618 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,619 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,619 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=OPEN, openSeqNum=5, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-11-20T17:23:35,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; OpenRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 in 167 msec 2024-11-20T17:23:35,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-20T17:23:35,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, REOPEN/MOVE in 481 msec 2024-11-20T17:23:35,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T17:23:35,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-11-20T17:23:35,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 896 msec 2024-11-20T17:23:35,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T17:23:35,626 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-20T17:23:35,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,630 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-20T17:23:35,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,633 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-20T17:23:35,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,636 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-20T17:23:35,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,639 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-20T17:23:35,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,642 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-20T17:23:35,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-20T17:23:35,648 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,648 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-20T17:23:35,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-20T17:23:35,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,654 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:55266 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-11-20T17:23:35,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:23:35,664 DEBUG [hconnection-0x662357c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,665 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,665 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-11-20T17:23:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:35,666 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:35,667 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:35,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:35,673 DEBUG [hconnection-0x226f705c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,673 DEBUG [hconnection-0x3b8481e3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,673 DEBUG [hconnection-0x416f9560-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,674 DEBUG [hconnection-0x6ae177cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,674 DEBUG [hconnection-0x76c498a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,674 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,674 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,674 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,675 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46388, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,675 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,676 DEBUG [hconnection-0x515c1bab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,676 DEBUG [hconnection-0x1bafd3f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,677 DEBUG [hconnection-0x7d54c60-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,677 DEBUG [hconnection-0xd0fa45b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:23:35,677 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,677 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,678 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,680 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:23:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:35,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:23:35,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:35,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:35,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:35,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:35,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:35,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:35,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123475698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123475698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123475700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123475700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123475701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112023491bce2bb241e9bc33898113e03a41_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123415680/Put/seqid=0 2024-11-20T17:23:35,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742327_1503 (size=12154) 2024-11-20T17:23:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:35,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123475801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123475802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123475803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123475804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123475804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:35,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:35,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:35,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:35,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:35,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:35,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:35,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:35,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123476004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123476005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123476005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123476006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123476007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,114 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:36,117 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112023491bce2bb241e9bc33898113e03a41_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112023491bce2bb241e9bc33898113e03a41_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:36,118 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/6df64848a39149febbfae78ade10dafc, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:36,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/6df64848a39149febbfae78ade10dafc is 175, key is test_row_0/A:col10/1732123415680/Put/seqid=0 2024-11-20T17:23:36,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742328_1504 (size=30955) 2024-11-20T17:23:36,123 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/6df64848a39149febbfae78ade10dafc 2024-11-20T17:23:36,124 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:36,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:36,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:36,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/09ed176a672a44e5b73b615605ae3f55 is 50, key is test_row_0/B:col10/1732123415680/Put/seqid=0 2024-11-20T17:23:36,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742329_1505 (size=12001) 2024-11-20T17:23:36,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:36,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:36,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:36,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:36,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123476306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123476307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123476307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123476308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123476310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:36,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:36,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:36,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/09ed176a672a44e5b73b615605ae3f55 2024-11-20T17:23:36,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/681ca112de9445d0adc087a2df6910a1 is 50, key is test_row_0/C:col10/1732123415680/Put/seqid=0 2024-11-20T17:23:36,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742330_1506 (size=12001) 2024-11-20T17:23:36,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:36,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:36,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:36,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,735 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:36,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:36,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:36,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:36,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123476809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123476810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123476812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123476812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123476812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:36,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:36,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:36,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:36,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/681ca112de9445d0adc087a2df6910a1 2024-11-20T17:23:36,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/6df64848a39149febbfae78ade10dafc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc 2024-11-20T17:23:36,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T17:23:36,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/09ed176a672a44e5b73b615605ae3f55 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/09ed176a672a44e5b73b615605ae3f55 2024-11-20T17:23:36,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/09ed176a672a44e5b73b615605ae3f55, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T17:23:36,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/681ca112de9445d0adc087a2df6910a1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/681ca112de9445d0adc087a2df6910a1 2024-11-20T17:23:36,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/681ca112de9445d0adc087a2df6910a1, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T17:23:36,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1311ms, sequenceid=17, compaction requested=false 2024-11-20T17:23:36,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:37,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:37,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:23:37,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:37,041 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:23:37,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:37,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:37,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:37,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:37,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:37,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:37,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aeeb15f3e7844e21862c2b35f39d76ea_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123415698/Put/seqid=0 2024-11-20T17:23:37,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742331_1507 (size=12154) 2024-11-20T17:23:37,368 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:23:37,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:37,455 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aeeb15f3e7844e21862c2b35f39d76ea_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aeeb15f3e7844e21862c2b35f39d76ea_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:37,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/5a1e5c5217434ac69eed00f40a7f08d3, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:37,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/5a1e5c5217434ac69eed00f40a7f08d3 is 175, key is test_row_0/A:col10/1732123415698/Put/seqid=0 2024-11-20T17:23:37,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742332_1508 (size=30955) 2024-11-20T17:23:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:37,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:37,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123477823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123477823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123477824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123477824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123477825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,861 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/5a1e5c5217434ac69eed00f40a7f08d3 2024-11-20T17:23:37,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/816e9da2e3214e5faf707bf573ff575c is 50, key is test_row_0/B:col10/1732123415698/Put/seqid=0 2024-11-20T17:23:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742333_1509 (size=12001) 2024-11-20T17:23:37,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123477926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123477927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123477927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:37,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123477927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123478129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123478129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123478129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123478129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,275 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/816e9da2e3214e5faf707bf573ff575c 2024-11-20T17:23:38,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/81cbf2fc318847328c8186a68bdb115a is 50, key is test_row_0/C:col10/1732123415698/Put/seqid=0 2024-11-20T17:23:38,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742334_1510 (size=12001) 2024-11-20T17:23:38,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123478431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123478432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123478433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123478433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,685 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/81cbf2fc318847328c8186a68bdb115a 2024-11-20T17:23:38,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/5a1e5c5217434ac69eed00f40a7f08d3 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3 2024-11-20T17:23:38,692 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3, entries=150, sequenceid=42, filesize=30.2 K 2024-11-20T17:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/816e9da2e3214e5faf707bf573ff575c as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/816e9da2e3214e5faf707bf573ff575c 2024-11-20T17:23:38,697 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/816e9da2e3214e5faf707bf573ff575c, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T17:23:38,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/81cbf2fc318847328c8186a68bdb115a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/81cbf2fc318847328c8186a68bdb115a 2024-11-20T17:23:38,701 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/81cbf2fc318847328c8186a68bdb115a, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T17:23:38,702 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1660ms, sequenceid=42, compaction requested=false 2024-11-20T17:23:38,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:38,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:38,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-20T17:23:38,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-11-20T17:23:38,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-20T17:23:38,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0360 sec 2024-11-20T17:23:38,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 3.0390 sec 2024-11-20T17:23:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:38,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:23:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:38,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:38,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112013267438c1b64346b6647ef407d569c8_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:38,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742335_1511 (size=12154) 2024-11-20T17:23:38,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123478957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123478958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123478959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:38,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123478960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123479060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123479061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123479062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123479062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123479263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123479265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123479265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123479266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,347 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:39,351 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112013267438c1b64346b6647ef407d569c8_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112013267438c1b64346b6647ef407d569c8_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:39,352 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/52e442ae5bab4106a71ce083e36c1cac, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:39,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/52e442ae5bab4106a71ce083e36c1cac is 175, key is test_row_0/A:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:39,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742336_1512 (size=30955) 2024-11-20T17:23:39,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123479565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123479568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123479568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123479568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,762 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/52e442ae5bab4106a71ce083e36c1cac 2024-11-20T17:23:39,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/988b64e811694a7b9b349fb0e2ec575b is 50, key is test_row_0/B:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:39,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:23:39,771 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-20T17:23:39,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742337_1513 (size=12001) 2024-11-20T17:23:39,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:39,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-20T17:23:39,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:39,783 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:39,784 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:39,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:39,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:39,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123479827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:39,828 DEBUG [Thread-2248 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:39,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:39,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:39,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:23:39,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:39,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:39,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:39,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:39,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:39,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123480071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:40,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123480071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:40,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123480072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:40,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123480073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:40,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:40,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:23:40,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:40,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/988b64e811694a7b9b349fb0e2ec575b 2024-11-20T17:23:40,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/380ef7e1c29d49e9b5600c6ccd31e5ca is 50, key is test_row_0/C:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:40,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742338_1514 (size=12001) 2024-11-20T17:23:40,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:40,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:23:40,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:40,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:40,393 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:40,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:23:40,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:40,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,546 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:40,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:23:40,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:40,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:40,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/380ef7e1c29d49e9b5600c6ccd31e5ca 2024-11-20T17:23:40,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/52e442ae5bab4106a71ce083e36c1cac as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac 2024-11-20T17:23:40,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac, entries=150, sequenceid=54, filesize=30.2 K 2024-11-20T17:23:40,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/988b64e811694a7b9b349fb0e2ec575b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/988b64e811694a7b9b349fb0e2ec575b 2024-11-20T17:23:40,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/988b64e811694a7b9b349fb0e2ec575b, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T17:23:40,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/380ef7e1c29d49e9b5600c6ccd31e5ca as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/380ef7e1c29d49e9b5600c6ccd31e5ca 2024-11-20T17:23:40,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/380ef7e1c29d49e9b5600c6ccd31e5ca, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T17:23:40,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1672ms, sequenceid=54, compaction requested=true 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:40,609 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:40,609 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:40,610 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:40,610 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:40,610 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B is initiating minor compaction (all files) 2024-11-20T17:23:40,610 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A is initiating minor compaction (all files) 2024-11-20T17:23:40,610 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,610 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,610 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=90.7 K 2024-11-20T17:23:40,610 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/09ed176a672a44e5b73b615605ae3f55, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/816e9da2e3214e5faf707bf573ff575c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/988b64e811694a7b9b349fb0e2ec575b] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.2 K 2024-11-20T17:23:40,610 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,610 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac] 2024-11-20T17:23:40,610 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ed176a672a44e5b73b615605ae3f55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123415680 2024-11-20T17:23:40,611 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6df64848a39149febbfae78ade10dafc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123415680 2024-11-20T17:23:40,611 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 816e9da2e3214e5faf707bf573ff575c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732123415697 2024-11-20T17:23:40,611 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a1e5c5217434ac69eed00f40a7f08d3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732123415697 2024-11-20T17:23:40,611 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 988b64e811694a7b9b349fb0e2ec575b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123417822 2024-11-20T17:23:40,611 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52e442ae5bab4106a71ce083e36c1cac, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123417822 2024-11-20T17:23:40,617 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:40,618 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#B#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:40,618 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/ee4a3da720134223b70effea8b33cc83 is 50, key is test_row_0/B:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:40,619 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209bc18010f956496994c57a193b703dd9_47d6ffc4e6051fed3b2d9d69f1d3f3c0 store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:40,621 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209bc18010f956496994c57a193b703dd9_47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:40,621 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209bc18010f956496994c57a193b703dd9_47d6ffc4e6051fed3b2d9d69f1d3f3c0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:40,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742339_1515 (size=12104) 2024-11-20T17:23:40,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742340_1516 (size=4469) 2024-11-20T17:23:40,629 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/ee4a3da720134223b70effea8b33cc83 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/ee4a3da720134223b70effea8b33cc83 2024-11-20T17:23:40,633 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into ee4a3da720134223b70effea8b33cc83(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:40,634 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:40,634 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, priority=13, startTime=1732123420609; duration=0sec 2024-11-20T17:23:40,634 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:40,634 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B 2024-11-20T17:23:40,634 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:40,635 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:40,635 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C is initiating minor compaction (all files) 2024-11-20T17:23:40,635 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,635 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/681ca112de9445d0adc087a2df6910a1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/81cbf2fc318847328c8186a68bdb115a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/380ef7e1c29d49e9b5600c6ccd31e5ca] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.2 K 2024-11-20T17:23:40,635 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 681ca112de9445d0adc087a2df6910a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123415680 2024-11-20T17:23:40,636 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 81cbf2fc318847328c8186a68bdb115a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732123415697 2024-11-20T17:23:40,636 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 380ef7e1c29d49e9b5600c6ccd31e5ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123417822 2024-11-20T17:23:40,643 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#C#compaction#441 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:40,643 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/9ba5e66b9e8048e7b028bf587659e2d5 is 50, key is test_row_0/C:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:40,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742341_1517 (size=12104) 2024-11-20T17:23:40,650 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/9ba5e66b9e8048e7b028bf587659e2d5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/9ba5e66b9e8048e7b028bf587659e2d5 2024-11-20T17:23:40,655 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 9ba5e66b9e8048e7b028bf587659e2d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:40,655 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:40,655 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, priority=13, startTime=1732123420609; duration=0sec 2024-11-20T17:23:40,655 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:40,655 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C 2024-11-20T17:23:40,698 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:40,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:23:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:40,699 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:40,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088c59f300a2f48928cd41ce51f32cbcb_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123418953/Put/seqid=0 2024-11-20T17:23:40,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742342_1518 (size=12154) 2024-11-20T17:23:40,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:40,715 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088c59f300a2f48928cd41ce51f32cbcb_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088c59f300a2f48928cd41ce51f32cbcb_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:40,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95a4d80ae0c24fdba33cd701e0464c9a, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:40,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95a4d80ae0c24fdba33cd701e0464c9a is 175, key is test_row_0/A:col10/1732123418953/Put/seqid=0 2024-11-20T17:23:40,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742343_1519 (size=30955) 2024-11-20T17:23:40,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:41,028 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#A#compaction#440 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:41,029 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/75a185d365164dedab1b052850e72848 is 175, key is test_row_0/A:col10/1732123417822/Put/seqid=0 2024-11-20T17:23:41,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742344_1520 (size=31058) 2024-11-20T17:23:41,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:41,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:41,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123481084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123481084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123481084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123481086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,121 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95a4d80ae0c24fdba33cd701e0464c9a 2024-11-20T17:23:41,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/cdf780078b3d4b32bcfb7f6c1faff16a is 50, key is test_row_0/B:col10/1732123418953/Put/seqid=0 2024-11-20T17:23:41,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742345_1521 (size=12001) 2024-11-20T17:23:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123481187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123481187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123481187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123481188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123481390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123481391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123481391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123481392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,437 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/75a185d365164dedab1b052850e72848 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/75a185d365164dedab1b052850e72848 2024-11-20T17:23:41,441 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 75a185d365164dedab1b052850e72848(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:41,441 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:41,441 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, priority=13, startTime=1732123420609; duration=0sec 2024-11-20T17:23:41,441 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:41,441 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A 2024-11-20T17:23:41,532 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/cdf780078b3d4b32bcfb7f6c1faff16a 2024-11-20T17:23:41,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/72b02bfd8e7b46c595077a218a13eb86 is 50, key is test_row_0/C:col10/1732123418953/Put/seqid=0 2024-11-20T17:23:41,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742346_1522 (size=12001) 2024-11-20T17:23:41,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123481692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123481694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123481695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123481695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:41,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T17:23:41,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T17:23:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:41,942 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/72b02bfd8e7b46c595077a218a13eb86 2024-11-20T17:23:41,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95a4d80ae0c24fdba33cd701e0464c9a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a 2024-11-20T17:23:41,949 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a, entries=150, sequenceid=80, filesize=30.2 K 2024-11-20T17:23:41,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/cdf780078b3d4b32bcfb7f6c1faff16a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/cdf780078b3d4b32bcfb7f6c1faff16a 2024-11-20T17:23:41,952 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/cdf780078b3d4b32bcfb7f6c1faff16a, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T17:23:41,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/72b02bfd8e7b46c595077a218a13eb86 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/72b02bfd8e7b46c595077a218a13eb86 2024-11-20T17:23:41,956 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/72b02bfd8e7b46c595077a218a13eb86, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T17:23:41,957 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1258ms, sequenceid=80, compaction requested=false 2024-11-20T17:23:41,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:41,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:41,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-20T17:23:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-20T17:23:41,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-20T17:23:41,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1740 sec 2024-11-20T17:23:41,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.1780 sec 2024-11-20T17:23:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:42,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:23:42,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:42,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:42,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:42,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:42,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:42,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:42,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112049fb0261b05044e6adf46842eda72ecd_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:42,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742347_1523 (size=12154) 2024-11-20T17:23:42,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123482215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123482216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123482216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123482217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123482318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123482319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123482320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123482320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123482521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123482522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123482523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123482524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,612 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:42,615 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112049fb0261b05044e6adf46842eda72ecd_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112049fb0261b05044e6adf46842eda72ecd_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:42,616 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/2e0383138d194a3891a8a3d26ea30702, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:42,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/2e0383138d194a3891a8a3d26ea30702 is 175, key is test_row_0/A:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:42,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742348_1524 (size=30955) 2024-11-20T17:23:42,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123482825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123482826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123482828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:42,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123482828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:43,038 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/2e0383138d194a3891a8a3d26ea30702 2024-11-20T17:23:43,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/c24ed9367d9446c9833f561eec3e3a3f is 50, key is test_row_0/B:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:43,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742349_1525 (size=12001) 2024-11-20T17:23:43,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:43,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123483327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:43,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123483332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:43,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123483333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:43,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123483334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:43,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/c24ed9367d9446c9833f561eec3e3a3f 2024-11-20T17:23:43,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/edfebd059add4e5db9253011638bb8d0 is 50, key is test_row_0/C:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:43,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742350_1526 (size=12001) 2024-11-20T17:23:43,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/edfebd059add4e5db9253011638bb8d0 2024-11-20T17:23:43,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:43,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123483860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:43,862 DEBUG [Thread-2248 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:43,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/2e0383138d194a3891a8a3d26ea30702 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702 2024-11-20T17:23:43,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702, entries=150, sequenceid=94, filesize=30.2 K 2024-11-20T17:23:43,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/c24ed9367d9446c9833f561eec3e3a3f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c24ed9367d9446c9833f561eec3e3a3f 2024-11-20T17:23:43,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c24ed9367d9446c9833f561eec3e3a3f, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T17:23:43,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/edfebd059add4e5db9253011638bb8d0 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/edfebd059add4e5db9253011638bb8d0 2024-11-20T17:23:43,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/edfebd059add4e5db9253011638bb8d0, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T17:23:43,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1678ms, sequenceid=94, compaction requested=true 2024-11-20T17:23:43,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:43,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:43,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:43,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:43,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:43,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:43,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:43,876 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:43,876 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B is initiating minor compaction (all files) 2024-11-20T17:23:43,877 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:43,877 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/ee4a3da720134223b70effea8b33cc83, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/cdf780078b3d4b32bcfb7f6c1faff16a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c24ed9367d9446c9833f561eec3e3a3f] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.3 K 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A is initiating minor compaction (all files) 2024-11-20T17:23:43,877 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:43,877 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/75a185d365164dedab1b052850e72848, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=90.8 K 2024-11-20T17:23:43,877 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/75a185d365164dedab1b052850e72848, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702] 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ee4a3da720134223b70effea8b33cc83, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123417822 2024-11-20T17:23:43,877 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75a185d365164dedab1b052850e72848, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123417822 2024-11-20T17:23:43,878 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting cdf780078b3d4b32bcfb7f6c1faff16a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123418953 2024-11-20T17:23:43,878 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95a4d80ae0c24fdba33cd701e0464c9a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123418953 2024-11-20T17:23:43,878 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c24ed9367d9446c9833f561eec3e3a3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732123421081 2024-11-20T17:23:43,878 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e0383138d194a3891a8a3d26ea30702, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732123421081 2024-11-20T17:23:43,885 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:43,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:23:43,888 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#B#compaction#449 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:43,888 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-20T17:23:43,888 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/c87d873960004ade917c47ef80997e40 is 50, key is test_row_0/B:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:43,890 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120454ab95f62394f74b9b16cb8334586e7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:43,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-20T17:23:43,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:23:43,891 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:43,892 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120454ab95f62394f74b9b16cb8334586e7_47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:43,892 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120454ab95f62394f74b9b16cb8334586e7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:43,892 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:43,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:43,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742351_1527 (size=12207) 2024-11-20T17:23:43,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742352_1528 (size=4469) 2024-11-20T17:23:43,904 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#A#compaction#448 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:43,905 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/b974ab530aee4ba5bbdbf7fde73c49d1 is 175, key is test_row_0/A:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:43,907 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/c87d873960004ade917c47ef80997e40 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c87d873960004ade917c47ef80997e40 2024-11-20T17:23:43,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742353_1529 (size=31161) 2024-11-20T17:23:43,912 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into c87d873960004ade917c47ef80997e40(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:43,912 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:43,912 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, priority=13, startTime=1732123423875; duration=0sec 2024-11-20T17:23:43,912 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:43,912 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B 2024-11-20T17:23:43,912 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:43,913 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:43,913 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C is initiating minor compaction (all files) 2024-11-20T17:23:43,913 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:43,913 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/9ba5e66b9e8048e7b028bf587659e2d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/72b02bfd8e7b46c595077a218a13eb86, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/edfebd059add4e5db9253011638bb8d0] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.3 K 2024-11-20T17:23:43,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ba5e66b9e8048e7b028bf587659e2d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123417822 2024-11-20T17:23:43,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 72b02bfd8e7b46c595077a218a13eb86, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123418953 2024-11-20T17:23:43,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting edfebd059add4e5db9253011638bb8d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732123421081 2024-11-20T17:23:43,923 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#C#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:43,924 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/4fbf1faa800b4c5b96da6788c0641135 is 50, key is test_row_0/C:col10/1732123421081/Put/seqid=0 2024-11-20T17:23:43,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742354_1530 (size=12207) 2024-11-20T17:23:43,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:23:44,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:44,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:23:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:44,045 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:23:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:44,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206ae9738c05de4e03913c2f1c8eef821f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123422215/Put/seqid=0 2024-11-20T17:23:44,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742355_1531 (size=12154) 2024-11-20T17:23:44,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:44,059 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206ae9738c05de4e03913c2f1c8eef821f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206ae9738c05de4e03913c2f1c8eef821f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:44,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/274753a6d45b4fd785d8ccd0a3bf04af, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:44,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/274753a6d45b4fd785d8ccd0a3bf04af is 175, key is test_row_0/A:col10/1732123422215/Put/seqid=0 2024-11-20T17:23:44,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742356_1532 (size=30955) 2024-11-20T17:23:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:23:44,314 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/b974ab530aee4ba5bbdbf7fde73c49d1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/b974ab530aee4ba5bbdbf7fde73c49d1 2024-11-20T17:23:44,317 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into b974ab530aee4ba5bbdbf7fde73c49d1(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:44,317 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:44,317 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, priority=13, startTime=1732123423875; duration=0sec 2024-11-20T17:23:44,317 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:44,317 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A 2024-11-20T17:23:44,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:44,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:44,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123484343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123484343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123484344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123484344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,350 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/4fbf1faa800b4c5b96da6788c0641135 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4fbf1faa800b4c5b96da6788c0641135 2024-11-20T17:23:44,353 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 4fbf1faa800b4c5b96da6788c0641135(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:44,353 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:44,353 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, priority=13, startTime=1732123423875; duration=0sec 2024-11-20T17:23:44,353 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:44,353 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C 2024-11-20T17:23:44,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123484446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123484446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123484447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123484447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,466 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/274753a6d45b4fd785d8ccd0a3bf04af 2024-11-20T17:23:44,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/3bfba0ac6f754c3c9153c5260240b226 is 50, key is test_row_0/B:col10/1732123422215/Put/seqid=0 2024-11-20T17:23:44,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742357_1533 (size=12001) 2024-11-20T17:23:44,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:23:44,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123484648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123484649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123484649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123484650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,878 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/3bfba0ac6f754c3c9153c5260240b226 2024-11-20T17:23:44,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/ba6f95a5ef654efe95f885c7e100e2cf is 50, key is test_row_0/C:col10/1732123422215/Put/seqid=0 2024-11-20T17:23:44,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742358_1534 (size=12001) 2024-11-20T17:23:44,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123484950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123484952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123484952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:44,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123484954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:44,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:23:45,288 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/ba6f95a5ef654efe95f885c7e100e2cf 2024-11-20T17:23:45,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/274753a6d45b4fd785d8ccd0a3bf04af as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af 2024-11-20T17:23:45,295 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af, entries=150, sequenceid=118, filesize=30.2 K 2024-11-20T17:23:45,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/3bfba0ac6f754c3c9153c5260240b226 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/3bfba0ac6f754c3c9153c5260240b226 2024-11-20T17:23:45,299 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/3bfba0ac6f754c3c9153c5260240b226, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T17:23:45,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/ba6f95a5ef654efe95f885c7e100e2cf as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ba6f95a5ef654efe95f885c7e100e2cf 2024-11-20T17:23:45,303 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ba6f95a5ef654efe95f885c7e100e2cf, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T17:23:45,303 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1258ms, sequenceid=118, compaction requested=false 2024-11-20T17:23:45,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:45,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:45,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-20T17:23:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-20T17:23:45,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T17:23:45,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-11-20T17:23:45,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.4170 sec 2024-11-20T17:23:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:45,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:23:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:45,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:45,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200c8f88ff9174446b8bec9c546b579b10_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:45,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742359_1535 (size=12304) 2024-11-20T17:23:45,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123485476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123485477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123485478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123485479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123485579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123485580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123485582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123485583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123485782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123485784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123485785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123485786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:45,867 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:45,871 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200c8f88ff9174446b8bec9c546b579b10_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200c8f88ff9174446b8bec9c546b579b10_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:45,871 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95758fe370e246ceabed231faeda0590, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:45,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95758fe370e246ceabed231faeda0590 is 175, key is test_row_0/A:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:45,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742360_1536 (size=31105) 2024-11-20T17:23:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:23:45,995 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T17:23:45,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-20T17:23:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:23:45,998 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:45,998 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:45,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:46,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123486086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123486089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123486089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123486090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:23:46,150 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:46,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:46,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,276 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95758fe370e246ceabed231faeda0590 2024-11-20T17:23:46,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/6e2991aaf7f9453d97b833bca0baa904 is 50, key is test_row_0/B:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:46,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742361_1537 (size=12151) 2024-11-20T17:23:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:23:46,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:46,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:46,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:46,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:46,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:46,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:46,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123486591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123486591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123486592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123486594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:23:46,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:46,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:46,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:46,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/6e2991aaf7f9453d97b833bca0baa904 2024-11-20T17:23:46,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/98a1309976f2422bb4350ffca766e005 is 50, key is test_row_0/C:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:46,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742362_1538 (size=12151) 2024-11-20T17:23:46,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:46,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:46,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:46,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,914 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:46,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:46,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:46,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:46,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:47,067 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:47,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:47,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:47,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:47,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:47,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:47,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/98a1309976f2422bb4350ffca766e005 2024-11-20T17:23:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:23:47,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/95758fe370e246ceabed231faeda0590 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590 2024-11-20T17:23:47,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590, entries=150, sequenceid=134, filesize=30.4 K 2024-11-20T17:23:47,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/6e2991aaf7f9453d97b833bca0baa904 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/6e2991aaf7f9453d97b833bca0baa904 2024-11-20T17:23:47,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/6e2991aaf7f9453d97b833bca0baa904, entries=150, sequenceid=134, filesize=11.9 K 2024-11-20T17:23:47,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/98a1309976f2422bb4350ffca766e005 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/98a1309976f2422bb4350ffca766e005 2024-11-20T17:23:47,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/98a1309976f2422bb4350ffca766e005, entries=150, sequenceid=134, filesize=11.9 K 2024-11-20T17:23:47,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1655ms, sequenceid=134, compaction requested=true 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:47,111 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:47,111 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:47,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:47,112 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:47,112 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:47,112 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B is initiating minor compaction (all files) 2024-11-20T17:23:47,112 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A is initiating minor compaction (all files) 2024-11-20T17:23:47,113 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,113 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,113 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c87d873960004ade917c47ef80997e40, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/3bfba0ac6f754c3c9153c5260240b226, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/6e2991aaf7f9453d97b833bca0baa904] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.5 K 2024-11-20T17:23:47,113 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/b974ab530aee4ba5bbdbf7fde73c49d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=91.0 K 2024-11-20T17:23:47,113 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,113 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/b974ab530aee4ba5bbdbf7fde73c49d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590] 2024-11-20T17:23:47,113 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting c87d873960004ade917c47ef80997e40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732123421081 2024-11-20T17:23:47,113 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting b974ab530aee4ba5bbdbf7fde73c49d1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732123421081 2024-11-20T17:23:47,114 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bfba0ac6f754c3c9153c5260240b226, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123422214 2024-11-20T17:23:47,114 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 274753a6d45b4fd785d8ccd0a3bf04af, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123422214 2024-11-20T17:23:47,114 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e2991aaf7f9453d97b833bca0baa904, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732123424343 2024-11-20T17:23:47,114 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95758fe370e246ceabed231faeda0590, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732123424343 2024-11-20T17:23:47,119 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:47,121 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411207a5a227a9fa647eba193f9c7d17f5442_47d6ffc4e6051fed3b2d9d69f1d3f3c0 store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:47,121 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#B#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:47,122 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/a79832a4bf5a4e91b17b60c5e3804c56 is 50, key is test_row_0/B:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:47,123 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411207a5a227a9fa647eba193f9c7d17f5442_47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:47,124 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207a5a227a9fa647eba193f9c7d17f5442_47d6ffc4e6051fed3b2d9d69f1d3f3c0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:47,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742363_1539 (size=12459) 2024-11-20T17:23:47,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742364_1540 (size=4469) 2024-11-20T17:23:47,140 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/a79832a4bf5a4e91b17b60c5e3804c56 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/a79832a4bf5a4e91b17b60c5e3804c56 2024-11-20T17:23:47,144 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into a79832a4bf5a4e91b17b60c5e3804c56(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:47,144 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:47,144 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, priority=13, startTime=1732123427111; duration=0sec 2024-11-20T17:23:47,144 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:47,144 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B 2024-11-20T17:23:47,144 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:47,145 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:47,146 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C is initiating minor compaction (all files) 2024-11-20T17:23:47,146 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,146 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4fbf1faa800b4c5b96da6788c0641135, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ba6f95a5ef654efe95f885c7e100e2cf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/98a1309976f2422bb4350ffca766e005] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.5 K 2024-11-20T17:23:47,146 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fbf1faa800b4c5b96da6788c0641135, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732123421081 2024-11-20T17:23:47,146 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting ba6f95a5ef654efe95f885c7e100e2cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123422214 2024-11-20T17:23:47,147 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 98a1309976f2422bb4350ffca766e005, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732123424343 2024-11-20T17:23:47,156 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#C#compaction#459 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:47,156 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/22a4d149ca5c41ce990f90987f077677 is 50, key is test_row_0/C:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:47,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742365_1541 (size=12459) 2024-11-20T17:23:47,165 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/22a4d149ca5c41ce990f90987f077677 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/22a4d149ca5c41ce990f90987f077677 2024-11-20T17:23:47,170 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 22a4d149ca5c41ce990f90987f077677(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:47,170 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:47,170 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, priority=13, startTime=1732123427111; duration=0sec 2024-11-20T17:23:47,170 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:47,170 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C 2024-11-20T17:23:47,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:47,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:23:47,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:47,221 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:23:47,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:47,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:47,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:47,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:47,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:47,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:47,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112061aca7abfa5940449940bd154c14681b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123425469/Put/seqid=0 2024-11-20T17:23:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742366_1542 (size=12304) 2024-11-20T17:23:47,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:47,237 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112061aca7abfa5940449940bd154c14681b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061aca7abfa5940449940bd154c14681b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:47,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/32a01d99a8904bc7aaf1e64b22e562ca, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:47,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/32a01d99a8904bc7aaf1e64b22e562ca is 175, key is test_row_0/A:col10/1732123425469/Put/seqid=0 2024-11-20T17:23:47,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742367_1543 (size=31105) 2024-11-20T17:23:47,538 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#A#compaction#457 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:47,538 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/007d212f64fd4f1c885ad6d28f41c480 is 175, key is test_row_0/A:col10/1732123424343/Put/seqid=0 2024-11-20T17:23:47,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742368_1544 (size=31413) 2024-11-20T17:23:47,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:47,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:47,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123487604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123487604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123487605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123487605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,642 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/32a01d99a8904bc7aaf1e64b22e562ca 2024-11-20T17:23:47,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/01537b4fbbfc4fb0a321dddde9df4beb is 50, key is test_row_0/B:col10/1732123425469/Put/seqid=0 2024-11-20T17:23:47,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742369_1545 (size=12151) 2024-11-20T17:23:47,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123487707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123487707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123487708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123487709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123487909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123487910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123487911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:47,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123487911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:47,947 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/007d212f64fd4f1c885ad6d28f41c480 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/007d212f64fd4f1c885ad6d28f41c480 2024-11-20T17:23:47,951 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 007d212f64fd4f1c885ad6d28f41c480(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:47,951 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:47,951 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, priority=13, startTime=1732123427111; duration=0sec 2024-11-20T17:23:47,951 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:47,951 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A 2024-11-20T17:23:48,052 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/01537b4fbbfc4fb0a321dddde9df4beb 2024-11-20T17:23:48,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/91ee9e14885343d2ae54e3b8241f0142 is 50, key is test_row_0/C:col10/1732123425469/Put/seqid=0 2024-11-20T17:23:48,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742370_1546 (size=12151) 2024-11-20T17:23:48,062 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/91ee9e14885343d2ae54e3b8241f0142 2024-11-20T17:23:48,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/32a01d99a8904bc7aaf1e64b22e562ca as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca 2024-11-20T17:23:48,068 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca, entries=150, sequenceid=159, filesize=30.4 K 2024-11-20T17:23:48,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/01537b4fbbfc4fb0a321dddde9df4beb as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/01537b4fbbfc4fb0a321dddde9df4beb 2024-11-20T17:23:48,071 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/01537b4fbbfc4fb0a321dddde9df4beb, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T17:23:48,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/91ee9e14885343d2ae54e3b8241f0142 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/91ee9e14885343d2ae54e3b8241f0142 2024-11-20T17:23:48,075 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/91ee9e14885343d2ae54e3b8241f0142, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T17:23:48,075 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 854ms, sequenceid=159, compaction requested=false 2024-11-20T17:23:48,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:48,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-20T17:23:48,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-20T17:23:48,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T17:23:48,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0790 sec 2024-11-20T17:23:48,079 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.0820 sec 2024-11-20T17:23:48,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:23:48,101 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-20T17:23:48,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:48,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-20T17:23:48,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:48,103 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:48,104 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:48,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:48,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:48,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:23:48,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:48,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:48,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:48,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:48,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:48,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:48,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120388997edebb54fd183c4a326a3c12e5e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:48,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742371_1547 (size=12304) 2024-11-20T17:23:48,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123488231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123488231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123488232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123488233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,255 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:48,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:48,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:48,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123488335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123488335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123488336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123488336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:48,407 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:48,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:48,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:48,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123488537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123488537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123488538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123488539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,560 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:48,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:48,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:48,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,634 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:48,637 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120388997edebb54fd183c4a326a3c12e5e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120388997edebb54fd183c4a326a3c12e5e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:48,638 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/23d64e2c895e4d698e52f6395b790473, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:48,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/23d64e2c895e4d698e52f6395b790473 is 175, key is test_row_0/A:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:48,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742372_1548 (size=31105) 2024-11-20T17:23:48,642 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/23d64e2c895e4d698e52f6395b790473 2024-11-20T17:23:48,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/89abd3bb191c4278aa8dbc34ab942aa2 is 50, key is test_row_0/B:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:48,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742373_1549 (size=12151) 2024-11-20T17:23:48,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:48,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:48,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:48,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:48,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123488840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123488840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123488841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123488843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:48,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:48,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:48,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:48,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:48,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:49,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:49,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:49,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,053 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/89abd3bb191c4278aa8dbc34ab942aa2 2024-11-20T17:23:49,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/5e85a50a879d47f5963048fc7813f3be is 50, key is test_row_0/C:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:49,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742374_1550 (size=12151) 2024-11-20T17:23:49,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:49,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:49,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:49,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:49,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:49,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:49,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:49,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:49,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123489345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:49,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:49,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123489345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:49,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:49,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123489347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:49,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:49,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123489350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:49,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/5e85a50a879d47f5963048fc7813f3be 2024-11-20T17:23:49,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/23d64e2c895e4d698e52f6395b790473 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473 2024-11-20T17:23:49,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473, entries=150, sequenceid=174, filesize=30.4 K 2024-11-20T17:23:49,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/89abd3bb191c4278aa8dbc34ab942aa2 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/89abd3bb191c4278aa8dbc34ab942aa2 2024-11-20T17:23:49,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/89abd3bb191c4278aa8dbc34ab942aa2, entries=150, sequenceid=174, filesize=11.9 K 2024-11-20T17:23:49,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/5e85a50a879d47f5963048fc7813f3be as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/5e85a50a879d47f5963048fc7813f3be 2024-11-20T17:23:49,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:49,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:49,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/5e85a50a879d47f5963048fc7813f3be, entries=150, sequenceid=174, filesize=11.9 K 2024-11-20T17:23:49,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1264ms, sequenceid=174, compaction requested=true 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:49,480 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:49,480 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:49,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:49,481 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:49,481 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:49,481 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A is initiating minor compaction (all files) 2024-11-20T17:23:49,481 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B is initiating minor compaction (all files) 2024-11-20T17:23:49,481 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,481 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,481 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/007d212f64fd4f1c885ad6d28f41c480, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=91.4 K 2024-11-20T17:23:49,481 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/a79832a4bf5a4e91b17b60c5e3804c56, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/01537b4fbbfc4fb0a321dddde9df4beb, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/89abd3bb191c4278aa8dbc34ab942aa2] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.9 K 2024-11-20T17:23:49,481 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,481 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/007d212f64fd4f1c885ad6d28f41c480, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473] 2024-11-20T17:23:49,482 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 007d212f64fd4f1c885ad6d28f41c480, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732123424343 2024-11-20T17:23:49,482 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32a01d99a8904bc7aaf1e64b22e562ca, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732123425469 2024-11-20T17:23:49,482 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23d64e2c895e4d698e52f6395b790473, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732123427602 2024-11-20T17:23:49,482 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting a79832a4bf5a4e91b17b60c5e3804c56, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732123424343 2024-11-20T17:23:49,483 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 01537b4fbbfc4fb0a321dddde9df4beb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732123425469 2024-11-20T17:23:49,483 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 89abd3bb191c4278aa8dbc34ab942aa2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732123427602 2024-11-20T17:23:49,488 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:49,489 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#B#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:49,489 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/433305bab0b84178b8873e240554a2ef is 50, key is test_row_0/B:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:49,491 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112065b342d85a2e44f2828bb13afd49579b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:49,493 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112065b342d85a2e44f2828bb13afd49579b_47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:49,493 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112065b342d85a2e44f2828bb13afd49579b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:49,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742375_1551 (size=12561) 2024-11-20T17:23:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742376_1552 (size=4469) 2024-11-20T17:23:49,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:49,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:23:49,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,630 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:23:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:49,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:49,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201da8d512a49645a8856b8d94b64f8e69_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123428230/Put/seqid=0 2024-11-20T17:23:49,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742377_1553 (size=12304) 2024-11-20T17:23:49,909 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/433305bab0b84178b8873e240554a2ef as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/433305bab0b84178b8873e240554a2ef 2024-11-20T17:23:49,913 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 433305bab0b84178b8873e240554a2ef(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:49,913 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:49,913 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, priority=13, startTime=1732123429480; duration=0sec 2024-11-20T17:23:49,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:49,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B 2024-11-20T17:23:49,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:49,914 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:49,915 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C is initiating minor compaction (all files) 2024-11-20T17:23:49,915 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:49,915 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/22a4d149ca5c41ce990f90987f077677, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/91ee9e14885343d2ae54e3b8241f0142, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/5e85a50a879d47f5963048fc7813f3be] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=35.9 K 2024-11-20T17:23:49,915 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a4d149ca5c41ce990f90987f077677, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732123424343 2024-11-20T17:23:49,915 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 91ee9e14885343d2ae54e3b8241f0142, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732123425469 2024-11-20T17:23:49,916 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e85a50a879d47f5963048fc7813f3be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732123427602 2024-11-20T17:23:49,918 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#A#compaction#467 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:49,918 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/1aa1823fa5c0444fa1b0b79aa6270687 is 175, key is test_row_0/A:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:49,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742378_1554 (size=31515) 2024-11-20T17:23:49,925 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#C#compaction#469 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:49,925 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/fe7bfeec0bdc46f4913720792ca41649 is 50, key is test_row_0/C:col10/1732123428214/Put/seqid=0 2024-11-20T17:23:49,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742379_1555 (size=12561) 2024-11-20T17:23:50,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:50,049 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201da8d512a49645a8856b8d94b64f8e69_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201da8d512a49645a8856b8d94b64f8e69_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:50,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/cece1fd6239e483eab29e63412f1e76b, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:50,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/cece1fd6239e483eab29e63412f1e76b is 175, key is test_row_0/A:col10/1732123428230/Put/seqid=0 2024-11-20T17:23:50,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742380_1556 (size=31105) 2024-11-20T17:23:50,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:50,326 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/1aa1823fa5c0444fa1b0b79aa6270687 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/1aa1823fa5c0444fa1b0b79aa6270687 2024-11-20T17:23:50,330 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 1aa1823fa5c0444fa1b0b79aa6270687(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:50,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:50,330 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, priority=13, startTime=1732123429480; duration=0sec 2024-11-20T17:23:50,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:50,330 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A 2024-11-20T17:23:50,333 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/fe7bfeec0bdc46f4913720792ca41649 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/fe7bfeec0bdc46f4913720792ca41649 2024-11-20T17:23:50,336 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into fe7bfeec0bdc46f4913720792ca41649(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:50,337 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:50,337 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, priority=13, startTime=1732123429480; duration=0sec 2024-11-20T17:23:50,337 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:50,337 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C 2024-11-20T17:23:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:50,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:50,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123490359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123490359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123490360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123490361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,454 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/cece1fd6239e483eab29e63412f1e76b 2024-11-20T17:23:50,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/602daededf634220a6fc70774368b91d is 50, key is test_row_0/B:col10/1732123428230/Put/seqid=0 2024-11-20T17:23:50,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123490462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123490463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123490463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742381_1557 (size=12151) 2024-11-20T17:23:50,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123490665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123490665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123490666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,866 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/602daededf634220a6fc70774368b91d 2024-11-20T17:23:50,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/dfd19814254a4b6ab7ae7fcf0082a8f5 is 50, key is test_row_0/C:col10/1732123428230/Put/seqid=0 2024-11-20T17:23:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742382_1558 (size=12151) 2024-11-20T17:23:50,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123490968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123490968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:50,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123490968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,277 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/dfd19814254a4b6ab7ae7fcf0082a8f5 2024-11-20T17:23:51,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/cece1fd6239e483eab29e63412f1e76b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b 2024-11-20T17:23:51,284 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b, entries=150, sequenceid=196, filesize=30.4 K 2024-11-20T17:23:51,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/602daededf634220a6fc70774368b91d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/602daededf634220a6fc70774368b91d 2024-11-20T17:23:51,288 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/602daededf634220a6fc70774368b91d, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:23:51,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/dfd19814254a4b6ab7ae7fcf0082a8f5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/dfd19814254a4b6ab7ae7fcf0082a8f5 2024-11-20T17:23:51,291 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/dfd19814254a4b6ab7ae7fcf0082a8f5, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:23:51,292 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1663ms, sequenceid=196, compaction requested=false 2024-11-20T17:23:51,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:51,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:51,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-20T17:23:51,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-20T17:23:51,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T17:23:51,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1890 sec 2024-11-20T17:23:51,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 3.1930 sec 2024-11-20T17:23:51,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:51,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T17:23:51,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:51,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:51,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:51,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:51,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:51,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:51,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112072663f35cfa949c7a4686c91f2b9de1f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:51,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742383_1559 (size=12304) 2024-11-20T17:23:51,490 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:51,493 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112072663f35cfa949c7a4686c91f2b9de1f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112072663f35cfa949c7a4686c91f2b9de1f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:51,495 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/e6aae4a34dce47d1a66ed14b569071e3, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:51,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/e6aae4a34dce47d1a66ed14b569071e3 is 175, key is test_row_0/A:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:51,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742384_1560 (size=31105) 2024-11-20T17:23:51,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123491501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123491501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123491503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123491605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123491605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123491607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123491807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123491808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:51,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123491810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:51,899 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/e6aae4a34dce47d1a66ed14b569071e3 2024-11-20T17:23:51,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/9300b01632cf4244808a1b635b31b340 is 50, key is test_row_0/B:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742385_1561 (size=12151) 2024-11-20T17:23:52,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123492111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123492112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123492114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:23:52,207 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-20T17:23:52,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-20T17:23:52,210 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:23:52,210 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:52,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:52,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/9300b01632cf4244808a1b635b31b340 2024-11-20T17:23:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:23:52,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/42d0be76c8e847c388cd51ff1dbbab70 is 50, key is test_row_0/C:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:52,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742386_1562 (size=12151) 2024-11-20T17:23:52,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:52,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T17:23:52,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:52,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1732123492380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,382 DEBUG [Thread-2244 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:23:52,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:52,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123492614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123492616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123492617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:52,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:52,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T17:23:52,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:52,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:52,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/42d0be76c8e847c388cd51ff1dbbab70 2024-11-20T17:23:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/e6aae4a34dce47d1a66ed14b569071e3 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3 2024-11-20T17:23:52,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3, entries=150, sequenceid=214, filesize=30.4 K 2024-11-20T17:23:52,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/9300b01632cf4244808a1b635b31b340 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/9300b01632cf4244808a1b635b31b340 2024-11-20T17:23:52,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/9300b01632cf4244808a1b635b31b340, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T17:23:52,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/42d0be76c8e847c388cd51ff1dbbab70 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/42d0be76c8e847c388cd51ff1dbbab70 2024-11-20T17:23:52,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/42d0be76c8e847c388cd51ff1dbbab70, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T17:23:52,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1261ms, sequenceid=214, compaction requested=true 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:52,735 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:52,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:52,735 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A is initiating minor compaction (all files) 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B is initiating minor compaction (all files) 2024-11-20T17:23:52,736 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,736 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,736 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/1aa1823fa5c0444fa1b0b79aa6270687, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=91.5 K 2024-11-20T17:23:52,736 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/433305bab0b84178b8873e240554a2ef, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/602daededf634220a6fc70774368b91d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/9300b01632cf4244808a1b635b31b340] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=36.0 K 2024-11-20T17:23:52,736 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/1aa1823fa5c0444fa1b0b79aa6270687, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3] 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 433305bab0b84178b8873e240554a2ef, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732123427602 2024-11-20T17:23:52,736 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1aa1823fa5c0444fa1b0b79aa6270687, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732123427602 2024-11-20T17:23:52,737 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 602daededf634220a6fc70774368b91d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123428230 2024-11-20T17:23:52,737 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting cece1fd6239e483eab29e63412f1e76b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123428230 2024-11-20T17:23:52,737 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 9300b01632cf4244808a1b635b31b340, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123430358 2024-11-20T17:23:52,737 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6aae4a34dce47d1a66ed14b569071e3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123430358 2024-11-20T17:23:52,753 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#B#compaction#475 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:52,753 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/37c650cdf1ea4dcdacaf1f1bcf7c296d is 50, key is test_row_0/B:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:52,764 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:52,773 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112071e1d909aefa4c9ca3021c91fec166ed_47d6ffc4e6051fed3b2d9d69f1d3f3c0 store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:52,775 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112071e1d909aefa4c9ca3021c91fec166ed_47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:52,775 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112071e1d909aefa4c9ca3021c91fec166ed_47d6ffc4e6051fed3b2d9d69f1d3f3c0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:52,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742387_1563 (size=12663) 2024-11-20T17:23:52,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742388_1564 (size=4469) 2024-11-20T17:23:52,780 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#A#compaction#476 average throughput is 1.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:52,781 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/37c650cdf1ea4dcdacaf1f1bcf7c296d as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/37c650cdf1ea4dcdacaf1f1bcf7c296d 2024-11-20T17:23:52,781 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/706e347b50b74e529e7bdd73098e2811 is 175, key is test_row_0/A:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:52,786 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 37c650cdf1ea4dcdacaf1f1bcf7c296d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:52,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742389_1565 (size=31617) 2024-11-20T17:23:52,786 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:52,786 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, priority=13, startTime=1732123432735; duration=0sec 2024-11-20T17:23:52,786 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:52,786 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B 2024-11-20T17:23:52,786 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:52,787 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:52,787 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C is initiating minor compaction (all files) 2024-11-20T17:23:52,787 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,787 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/fe7bfeec0bdc46f4913720792ca41649, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/dfd19814254a4b6ab7ae7fcf0082a8f5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/42d0be76c8e847c388cd51ff1dbbab70] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=36.0 K 2024-11-20T17:23:52,788 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting fe7bfeec0bdc46f4913720792ca41649, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732123427602 2024-11-20T17:23:52,788 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting dfd19814254a4b6ab7ae7fcf0082a8f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123428230 2024-11-20T17:23:52,788 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 42d0be76c8e847c388cd51ff1dbbab70, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123430358 2024-11-20T17:23:52,791 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/706e347b50b74e529e7bdd73098e2811 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/706e347b50b74e529e7bdd73098e2811 2024-11-20T17:23:52,796 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 706e347b50b74e529e7bdd73098e2811(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:52,796 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:52,796 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, priority=13, startTime=1732123432735; duration=0sec 2024-11-20T17:23:52,796 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#C#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:52,796 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:52,796 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A 2024-11-20T17:23:52,797 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/54cbb4370c4140d3b57bdd5219cbc3f6 is 50, key is test_row_0/C:col10/1732123431473/Put/seqid=0 2024-11-20T17:23:52,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742390_1566 (size=12663) 2024-11-20T17:23:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:23:52,819 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:52,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:52,820 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:52,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:52,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112067cb4210a3804c0096f9dec52bd5ce0e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123431496/Put/seqid=0 2024-11-20T17:23:52,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742391_1567 (size=12304) 2024-11-20T17:23:52,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:52,837 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112067cb4210a3804c0096f9dec52bd5ce0e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112067cb4210a3804c0096f9dec52bd5ce0e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:52,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/67bbc5b398cc4d68b1f7fa4522345e8f, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:52,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/67bbc5b398cc4d68b1f7fa4522345e8f is 175, key is test_row_0/A:col10/1732123431496/Put/seqid=0 2024-11-20T17:23:52,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742392_1568 (size=31105) 2024-11-20T17:23:53,213 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/54cbb4370c4140d3b57bdd5219cbc3f6 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/54cbb4370c4140d3b57bdd5219cbc3f6 2024-11-20T17:23:53,217 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 54cbb4370c4140d3b57bdd5219cbc3f6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:53,217 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:53,217 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, priority=13, startTime=1732123432735; duration=0sec 2024-11-20T17:23:53,217 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:53,217 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C 2024-11-20T17:23:53,242 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/67bbc5b398cc4d68b1f7fa4522345e8f 2024-11-20T17:23:53,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/35f1a88303a54c9a8753add0c963f886 is 50, key is test_row_0/B:col10/1732123431496/Put/seqid=0 2024-11-20T17:23:53,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742393_1569 (size=12151) 2024-11-20T17:23:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:23:53,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:53,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123493631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123493632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123493633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,652 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/35f1a88303a54c9a8753add0c963f886 2024-11-20T17:23:53,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/6cb479a5bea44f8e873d657507e65e0b is 50, key is test_row_0/C:col10/1732123431496/Put/seqid=0 2024-11-20T17:23:53,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742394_1570 (size=12151) 2024-11-20T17:23:53,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123493735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123493735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123493735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123493938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123493938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123493939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:53,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46396 deadline: 1732123493944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:53,945 DEBUG [Thread-2248 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18245 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., hostname=d514dc944523,40121,1732123262111, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:23:54,063 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/6cb479a5bea44f8e873d657507e65e0b 2024-11-20T17:23:54,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/67bbc5b398cc4d68b1f7fa4522345e8f as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f 2024-11-20T17:23:54,070 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f, entries=150, sequenceid=237, filesize=30.4 K 2024-11-20T17:23:54,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/35f1a88303a54c9a8753add0c963f886 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/35f1a88303a54c9a8753add0c963f886 2024-11-20T17:23:54,074 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/35f1a88303a54c9a8753add0c963f886, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T17:23:54,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/6cb479a5bea44f8e873d657507e65e0b as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/6cb479a5bea44f8e873d657507e65e0b 2024-11-20T17:23:54,077 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/6cb479a5bea44f8e873d657507e65e0b, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T17:23:54,078 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1258ms, sequenceid=237, compaction requested=false 2024-11-20T17:23:54,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:54,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-20T17:23:54,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-20T17:23:54,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T17:23:54,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8690 sec 2024-11-20T17:23:54,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.8730 sec 2024-11-20T17:23:54,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:54,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T17:23:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:54,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:54,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a145bd79a97348f0a15800c6bda6b8b6_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:54,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742395_1571 (size=14794) 2024-11-20T17:23:54,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123494259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123494260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123494261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:23:54,315 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T17:23:54,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:23:54,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T17:23:54,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:54,317 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:23:54,318 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:23:54,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:23:54,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123494361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123494363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123494363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:54,469 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:54,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:54,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:54,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,470 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123494565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123494565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123494566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:54,622 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:54,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:54,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:54,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,653 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:54,657 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a145bd79a97348f0a15800c6bda6b8b6_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a145bd79a97348f0a15800c6bda6b8b6_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:54,658 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/63e47fe0466e4908986367ead7e5bae1, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:54,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/63e47fe0466e4908986367ead7e5bae1 is 175, key is test_row_0/A:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:54,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742396_1572 (size=39749) 2024-11-20T17:23:54,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:54,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:54,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:54,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123494868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123494869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:54,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123494870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:54,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:54,928 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:54,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:54,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:54,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,064 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/63e47fe0466e4908986367ead7e5bae1 2024-11-20T17:23:55,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/638396cc632a4100850cb0cc15531217 is 50, key is test_row_0/B:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:55,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742397_1573 (size=12151) 2024-11-20T17:23:55,081 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:55,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,233 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:55,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1732123495370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:55,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46368 deadline: 1732123495372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:55,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:23:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40121 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46426 deadline: 1732123495373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 2024-11-20T17:23:55,386 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:55,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:55,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/638396cc632a4100850cb0cc15531217 2024-11-20T17:23:55,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/802f523fbed04ca8bbe7e80e3afd13b5 is 50, key is test_row_0/C:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:55,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742398_1574 (size=12151) 2024-11-20T17:23:55,538 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:55,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,665 DEBUG [Thread-2257 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:55266 2024-11-20T17:23:55,665 DEBUG [Thread-2257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:55,665 DEBUG [Thread-2253 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:55266 2024-11-20T17:23:55,665 DEBUG [Thread-2253 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:55,665 DEBUG [Thread-2251 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:55266 2024-11-20T17:23:55,665 DEBUG [Thread-2251 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:55,666 DEBUG [Thread-2255 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:55266 2024-11-20T17:23:55,667 DEBUG [Thread-2255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:55,667 DEBUG [Thread-2259 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:55266 2024-11-20T17:23:55,667 DEBUG [Thread-2259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:55,691 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:55,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,843 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:55,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:23:55,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/802f523fbed04ca8bbe7e80e3afd13b5 2024-11-20T17:23:55,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/63e47fe0466e4908986367ead7e5bae1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1 2024-11-20T17:23:55,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1, entries=200, sequenceid=254, filesize=38.8 K 2024-11-20T17:23:55,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/638396cc632a4100850cb0cc15531217 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/638396cc632a4100850cb0cc15531217 2024-11-20T17:23:55,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/638396cc632a4100850cb0cc15531217, entries=150, sequenceid=254, filesize=11.9 K 2024-11-20T17:23:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/802f523fbed04ca8bbe7e80e3afd13b5 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/802f523fbed04ca8bbe7e80e3afd13b5 2024-11-20T17:23:55,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/802f523fbed04ca8bbe7e80e3afd13b5, entries=150, sequenceid=254, filesize=11.9 K 2024-11-20T17:23:55,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1656ms, sequenceid=254, compaction requested=true 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:23:55,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:55,898 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:55,898 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B is initiating minor compaction (all files) 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A is initiating minor compaction (all files) 2024-11-20T17:23:55,899 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,899 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,899 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/37c650cdf1ea4dcdacaf1f1bcf7c296d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/35f1a88303a54c9a8753add0c963f886, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/638396cc632a4100850cb0cc15531217] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=36.1 K 2024-11-20T17:23:55,899 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/706e347b50b74e529e7bdd73098e2811, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=100.1 K 2024-11-20T17:23:55,899 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. files: [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/706e347b50b74e529e7bdd73098e2811, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1] 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 37c650cdf1ea4dcdacaf1f1bcf7c296d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123430358 2024-11-20T17:23:55,899 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 706e347b50b74e529e7bdd73098e2811, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123430358 2024-11-20T17:23:55,900 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 35f1a88303a54c9a8753add0c963f886, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732123431496 2024-11-20T17:23:55,900 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67bbc5b398cc4d68b1f7fa4522345e8f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732123431496 2024-11-20T17:23:55,900 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 638396cc632a4100850cb0cc15531217, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732123433629 2024-11-20T17:23:55,900 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63e47fe0466e4908986367ead7e5bae1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732123433629 2024-11-20T17:23:55,905 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:55,905 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#B#compaction#484 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:55,905 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/f132adff399f46929a12d5c55a09724e is 50, key is test_row_0/B:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:55,906 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112029b1ed2941194ee5900824902261b7b3_47d6ffc4e6051fed3b2d9d69f1d3f3c0 store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742399_1575 (size=12765) 2024-11-20T17:23:55,910 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112029b1ed2941194ee5900824902261b7b3_47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:55,910 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112029b1ed2941194ee5900824902261b7b3_47d6ffc4e6051fed3b2d9d69f1d3f3c0 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:55,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742400_1576 (size=4469) 2024-11-20T17:23:55,995 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:23:55,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40121 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T17:23:55,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:55,996 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T17:23:55,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:23:55,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:55,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:23:55,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:55,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:23:55,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:23:56,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201a41803c31fb42e083e1b68b775e51c7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123434260/Put/seqid=0 2024-11-20T17:23:56,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742401_1577 (size=12454) 2024-11-20T17:23:56,312 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/f132adff399f46929a12d5c55a09724e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/f132adff399f46929a12d5c55a09724e 2024-11-20T17:23:56,314 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#A#compaction#485 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:56,314 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/fce9647aaba84b5596cca978d70272eb is 175, key is test_row_0/A:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:56,316 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/B of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into f132adff399f46929a12d5c55a09724e(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:56,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:56,316 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, priority=13, startTime=1732123435898; duration=0sec 2024-11-20T17:23:56,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:23:56,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:B 2024-11-20T17:23:56,316 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:23:56,317 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:23:56,317 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1540): 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C is initiating minor compaction (all files) 2024-11-20T17:23:56,317 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C in TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:56,317 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/54cbb4370c4140d3b57bdd5219cbc3f6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/6cb479a5bea44f8e873d657507e65e0b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/802f523fbed04ca8bbe7e80e3afd13b5] into tmpdir=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp, totalSize=36.1 K 2024-11-20T17:23:56,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742402_1578 (size=31719) 2024-11-20T17:23:56,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 54cbb4370c4140d3b57bdd5219cbc3f6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123430358 2024-11-20T17:23:56,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cb479a5bea44f8e873d657507e65e0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732123431496 2024-11-20T17:23:56,318 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] compactions.Compactor(224): Compacting 802f523fbed04ca8bbe7e80e3afd13b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732123433629 2024-11-20T17:23:56,325 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 47d6ffc4e6051fed3b2d9d69f1d3f3c0#C#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:23:56,325 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/4501f12ba0424b14807e4c5e2b2b3968 is 50, key is test_row_0/C:col10/1732123434241/Put/seqid=0 2024-11-20T17:23:56,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742403_1579 (size=12765) 2024-11-20T17:23:56,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40121 {}] regionserver.HRegion(8581): Flush requested on 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:56,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. as already flushing 2024-11-20T17:23:56,374 DEBUG [Thread-2246 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:55266 2024-11-20T17:23:56,375 DEBUG [Thread-2246 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:56,378 DEBUG [Thread-2240 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:55266 2024-11-20T17:23:56,378 DEBUG [Thread-2240 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:56,380 DEBUG [Thread-2242 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:55266 2024-11-20T17:23:56,380 DEBUG [Thread-2242 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:56,389 DEBUG [Thread-2244 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:55266 2024-11-20T17:23:56,389 DEBUG [Thread-2244 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:23:56,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:23:56,408 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201a41803c31fb42e083e1b68b775e51c7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201a41803c31fb42e083e1b68b775e51c7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:23:56,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/18aa50a345094707805367b4babf6487, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:23:56,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/18aa50a345094707805367b4babf6487 is 175, key is test_row_0/A:col10/1732123434260/Put/seqid=0 2024-11-20T17:23:56,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742404_1580 (size=31255) 2024-11-20T17:23:56,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:56,722 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/fce9647aaba84b5596cca978d70272eb as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/fce9647aaba84b5596cca978d70272eb 2024-11-20T17:23:56,725 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/A of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into fce9647aaba84b5596cca978d70272eb(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:56,725 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:56,725 INFO [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, priority=13, startTime=1732123435898; duration=0sec 2024-11-20T17:23:56,725 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:56,725 DEBUG [RS:0;d514dc944523:40121-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:A 2024-11-20T17:23:56,731 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/4501f12ba0424b14807e4c5e2b2b3968 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4501f12ba0424b14807e4c5e2b2b3968 2024-11-20T17:23:56,734 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 47d6ffc4e6051fed3b2d9d69f1d3f3c0/C of 47d6ffc4e6051fed3b2d9d69f1d3f3c0 into 4501f12ba0424b14807e4c5e2b2b3968(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:23:56,734 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:56,734 INFO [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0., storeName=47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, priority=13, startTime=1732123435898; duration=0sec 2024-11-20T17:23:56,734 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:23:56,734 DEBUG [RS:0;d514dc944523:40121-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 47d6ffc4e6051fed3b2d9d69f1d3f3c0:C 2024-11-20T17:23:56,813 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/18aa50a345094707805367b4babf6487 2024-11-20T17:23:56,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/0f3e155f2ae64e84adfcdc213532361a is 50, key is test_row_0/B:col10/1732123434260/Put/seqid=0 2024-11-20T17:23:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742405_1581 (size=12301) 2024-11-20T17:23:57,221 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/0f3e155f2ae64e84adfcdc213532361a 2024-11-20T17:23:57,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/ef6e98f807ee4cc9aa92822f6e2c3dce is 50, key is test_row_0/C:col10/1732123434260/Put/seqid=0 2024-11-20T17:23:57,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742406_1582 (size=12301) 2024-11-20T17:23:57,630 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/ef6e98f807ee4cc9aa92822f6e2c3dce 2024-11-20T17:23:57,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/18aa50a345094707805367b4babf6487 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/18aa50a345094707805367b4babf6487 2024-11-20T17:23:57,636 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/18aa50a345094707805367b4babf6487, entries=150, sequenceid=274, filesize=30.5 K 2024-11-20T17:23:57,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/0f3e155f2ae64e84adfcdc213532361a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/0f3e155f2ae64e84adfcdc213532361a 2024-11-20T17:23:57,639 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/0f3e155f2ae64e84adfcdc213532361a, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:23:57,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/ef6e98f807ee4cc9aa92822f6e2c3dce as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ef6e98f807ee4cc9aa92822f6e2c3dce 2024-11-20T17:23:57,642 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ef6e98f807ee4cc9aa92822f6e2c3dce, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:23:57,643 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=26.84 KB/27480 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1647ms, sequenceid=274, compaction requested=false 2024-11-20T17:23:57,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:23:57,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:23:57,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T17:23:57,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T17:23:57,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T17:23:57,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3260 sec 2024-11-20T17:23:57,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 3.3290 sec 2024-11-20T17:23:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T17:23:58,422 INFO [Thread-2250 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T17:24:00,491 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:24:03,960 DEBUG [Thread-2248 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:55266 2024-11-20T17:24:03,960 DEBUG [Thread-2248 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7374 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7212 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7158 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7377 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7226 2024-11-20T17:24:03,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:24:03,961 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:24:03,961 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:55266 2024-11-20T17:24:03,961 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:03,961 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:24:03,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:24:03,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:24:03,963 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123443963"}]},"ts":"1732123443963"} 2024-11-20T17:24:03,964 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:24:03,966 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:24:03,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:24:03,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, UNASSIGN}] 2024-11-20T17:24:03,968 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, UNASSIGN 2024-11-20T17:24:03,968 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=CLOSING, regionLocation=d514dc944523,40121,1732123262111 2024-11-20T17:24:03,969 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:24:03,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111}] 2024-11-20T17:24:04,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:24:04,120 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,40121,1732123262111 2024-11-20T17:24:04,120 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:04,120 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing 47d6ffc4e6051fed3b2d9d69f1d3f3c0, disabling compactions & flushes 2024-11-20T17:24:04,121 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. after waiting 0 ms 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:24:04,121 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing 47d6ffc4e6051fed3b2d9d69f1d3f3c0 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=A 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=B 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 47d6ffc4e6051fed3b2d9d69f1d3f3c0, store=C 2024-11-20T17:24:04,121 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:04,126 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205b36c048fe8b4919b8d79a41bff6f786_47d6ffc4e6051fed3b2d9d69f1d3f3c0 is 50, key is test_row_0/A:col10/1732123443959/Put/seqid=0 2024-11-20T17:24:04,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742407_1583 (size=12454) 2024-11-20T17:24:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:24:04,529 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:04,533 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205b36c048fe8b4919b8d79a41bff6f786_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205b36c048fe8b4919b8d79a41bff6f786_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:04,533 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/be045c3a6c4c401c9afffcc730198ce1, store: [table=TestAcidGuarantees family=A region=47d6ffc4e6051fed3b2d9d69f1d3f3c0] 2024-11-20T17:24:04,534 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/be045c3a6c4c401c9afffcc730198ce1 is 175, key is test_row_0/A:col10/1732123443959/Put/seqid=0 2024-11-20T17:24:04,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742408_1584 (size=31255) 2024-11-20T17:24:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:24:04,937 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/be045c3a6c4c401c9afffcc730198ce1 2024-11-20T17:24:04,942 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/2431bb4a875a472ba2a5c66fa685c90a is 50, key is test_row_0/B:col10/1732123443959/Put/seqid=0 2024-11-20T17:24:04,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742409_1585 (size=12301) 2024-11-20T17:24:05,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:24:05,346 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/2431bb4a875a472ba2a5c66fa685c90a 2024-11-20T17:24:05,351 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/b27ef51f5dbd4f64963908e9806ed157 is 50, key is test_row_0/C:col10/1732123443959/Put/seqid=0 2024-11-20T17:24:05,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742410_1586 (size=12301) 2024-11-20T17:24:05,755 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/b27ef51f5dbd4f64963908e9806ed157 2024-11-20T17:24:05,758 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/A/be045c3a6c4c401c9afffcc730198ce1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/be045c3a6c4c401c9afffcc730198ce1 2024-11-20T17:24:05,761 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/be045c3a6c4c401c9afffcc730198ce1, entries=150, sequenceid=285, filesize=30.5 K 2024-11-20T17:24:05,761 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/B/2431bb4a875a472ba2a5c66fa685c90a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/2431bb4a875a472ba2a5c66fa685c90a 2024-11-20T17:24:05,764 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/2431bb4a875a472ba2a5c66fa685c90a, entries=150, sequenceid=285, filesize=12.0 K 2024-11-20T17:24:05,764 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/.tmp/C/b27ef51f5dbd4f64963908e9806ed157 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/b27ef51f5dbd4f64963908e9806ed157 2024-11-20T17:24:05,766 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/b27ef51f5dbd4f64963908e9806ed157, entries=150, sequenceid=285, filesize=12.0 K 2024-11-20T17:24:05,767 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 47d6ffc4e6051fed3b2d9d69f1d3f3c0 in 1646ms, sequenceid=285, compaction requested=true 2024-11-20T17:24:05,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/75a185d365164dedab1b052850e72848, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/b974ab530aee4ba5bbdbf7fde73c49d1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/007d212f64fd4f1c885ad6d28f41c480, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/1aa1823fa5c0444fa1b0b79aa6270687, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/706e347b50b74e529e7bdd73098e2811, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1] to archive 2024-11-20T17:24:05,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:24:05,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/6df64848a39149febbfae78ade10dafc 2024-11-20T17:24:05,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/5a1e5c5217434ac69eed00f40a7f08d3 2024-11-20T17:24:05,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/75a185d365164dedab1b052850e72848 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/75a185d365164dedab1b052850e72848 2024-11-20T17:24:05,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/52e442ae5bab4106a71ce083e36c1cac 2024-11-20T17:24:05,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95a4d80ae0c24fdba33cd701e0464c9a 2024-11-20T17:24:05,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/b974ab530aee4ba5bbdbf7fde73c49d1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/b974ab530aee4ba5bbdbf7fde73c49d1 2024-11-20T17:24:05,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/2e0383138d194a3891a8a3d26ea30702 2024-11-20T17:24:05,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/274753a6d45b4fd785d8ccd0a3bf04af 2024-11-20T17:24:05,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/007d212f64fd4f1c885ad6d28f41c480 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/007d212f64fd4f1c885ad6d28f41c480 2024-11-20T17:24:05,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/95758fe370e246ceabed231faeda0590 2024-11-20T17:24:05,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/32a01d99a8904bc7aaf1e64b22e562ca 2024-11-20T17:24:05,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/1aa1823fa5c0444fa1b0b79aa6270687 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/1aa1823fa5c0444fa1b0b79aa6270687 2024-11-20T17:24:05,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/23d64e2c895e4d698e52f6395b790473 2024-11-20T17:24:05,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/cece1fd6239e483eab29e63412f1e76b 2024-11-20T17:24:05,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/706e347b50b74e529e7bdd73098e2811 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/706e347b50b74e529e7bdd73098e2811 2024-11-20T17:24:05,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/e6aae4a34dce47d1a66ed14b569071e3 2024-11-20T17:24:05,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/67bbc5b398cc4d68b1f7fa4522345e8f 2024-11-20T17:24:05,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/63e47fe0466e4908986367ead7e5bae1 2024-11-20T17:24:05,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/09ed176a672a44e5b73b615605ae3f55, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/816e9da2e3214e5faf707bf573ff575c, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/ee4a3da720134223b70effea8b33cc83, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/988b64e811694a7b9b349fb0e2ec575b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/cdf780078b3d4b32bcfb7f6c1faff16a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c87d873960004ade917c47ef80997e40, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c24ed9367d9446c9833f561eec3e3a3f, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/3bfba0ac6f754c3c9153c5260240b226, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/a79832a4bf5a4e91b17b60c5e3804c56, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/6e2991aaf7f9453d97b833bca0baa904, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/01537b4fbbfc4fb0a321dddde9df4beb, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/433305bab0b84178b8873e240554a2ef, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/89abd3bb191c4278aa8dbc34ab942aa2, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/602daededf634220a6fc70774368b91d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/37c650cdf1ea4dcdacaf1f1bcf7c296d, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/9300b01632cf4244808a1b635b31b340, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/35f1a88303a54c9a8753add0c963f886, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/638396cc632a4100850cb0cc15531217] to archive 2024-11-20T17:24:05,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:24:05,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/09ed176a672a44e5b73b615605ae3f55 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/09ed176a672a44e5b73b615605ae3f55 2024-11-20T17:24:05,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/816e9da2e3214e5faf707bf573ff575c to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/816e9da2e3214e5faf707bf573ff575c 2024-11-20T17:24:05,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/ee4a3da720134223b70effea8b33cc83 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/ee4a3da720134223b70effea8b33cc83 2024-11-20T17:24:05,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/988b64e811694a7b9b349fb0e2ec575b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/988b64e811694a7b9b349fb0e2ec575b 2024-11-20T17:24:05,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/cdf780078b3d4b32bcfb7f6c1faff16a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/cdf780078b3d4b32bcfb7f6c1faff16a 2024-11-20T17:24:05,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c87d873960004ade917c47ef80997e40 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c87d873960004ade917c47ef80997e40 2024-11-20T17:24:05,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c24ed9367d9446c9833f561eec3e3a3f to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/c24ed9367d9446c9833f561eec3e3a3f 2024-11-20T17:24:05,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/3bfba0ac6f754c3c9153c5260240b226 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/3bfba0ac6f754c3c9153c5260240b226 2024-11-20T17:24:05,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/a79832a4bf5a4e91b17b60c5e3804c56 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/a79832a4bf5a4e91b17b60c5e3804c56 2024-11-20T17:24:05,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/6e2991aaf7f9453d97b833bca0baa904 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/6e2991aaf7f9453d97b833bca0baa904 2024-11-20T17:24:05,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/01537b4fbbfc4fb0a321dddde9df4beb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/01537b4fbbfc4fb0a321dddde9df4beb 2024-11-20T17:24:05,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/433305bab0b84178b8873e240554a2ef to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/433305bab0b84178b8873e240554a2ef 2024-11-20T17:24:05,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/89abd3bb191c4278aa8dbc34ab942aa2 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/89abd3bb191c4278aa8dbc34ab942aa2 2024-11-20T17:24:05,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/602daededf634220a6fc70774368b91d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/602daededf634220a6fc70774368b91d 2024-11-20T17:24:05,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/37c650cdf1ea4dcdacaf1f1bcf7c296d to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/37c650cdf1ea4dcdacaf1f1bcf7c296d 2024-11-20T17:24:05,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/9300b01632cf4244808a1b635b31b340 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/9300b01632cf4244808a1b635b31b340 2024-11-20T17:24:05,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/35f1a88303a54c9a8753add0c963f886 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/35f1a88303a54c9a8753add0c963f886 2024-11-20T17:24:05,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/638396cc632a4100850cb0cc15531217 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/638396cc632a4100850cb0cc15531217 2024-11-20T17:24:05,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/681ca112de9445d0adc087a2df6910a1, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/81cbf2fc318847328c8186a68bdb115a, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/9ba5e66b9e8048e7b028bf587659e2d5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/380ef7e1c29d49e9b5600c6ccd31e5ca, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/72b02bfd8e7b46c595077a218a13eb86, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4fbf1faa800b4c5b96da6788c0641135, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/edfebd059add4e5db9253011638bb8d0, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ba6f95a5ef654efe95f885c7e100e2cf, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/22a4d149ca5c41ce990f90987f077677, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/98a1309976f2422bb4350ffca766e005, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/91ee9e14885343d2ae54e3b8241f0142, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/fe7bfeec0bdc46f4913720792ca41649, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/5e85a50a879d47f5963048fc7813f3be, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/dfd19814254a4b6ab7ae7fcf0082a8f5, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/54cbb4370c4140d3b57bdd5219cbc3f6, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/42d0be76c8e847c388cd51ff1dbbab70, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/6cb479a5bea44f8e873d657507e65e0b, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/802f523fbed04ca8bbe7e80e3afd13b5] to archive 2024-11-20T17:24:05,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:24:05,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/681ca112de9445d0adc087a2df6910a1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/681ca112de9445d0adc087a2df6910a1 2024-11-20T17:24:05,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/81cbf2fc318847328c8186a68bdb115a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/81cbf2fc318847328c8186a68bdb115a 2024-11-20T17:24:05,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/9ba5e66b9e8048e7b028bf587659e2d5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/9ba5e66b9e8048e7b028bf587659e2d5 2024-11-20T17:24:05,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/380ef7e1c29d49e9b5600c6ccd31e5ca to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/380ef7e1c29d49e9b5600c6ccd31e5ca 2024-11-20T17:24:05,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/72b02bfd8e7b46c595077a218a13eb86 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/72b02bfd8e7b46c595077a218a13eb86 2024-11-20T17:24:05,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4fbf1faa800b4c5b96da6788c0641135 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4fbf1faa800b4c5b96da6788c0641135 2024-11-20T17:24:05,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/edfebd059add4e5db9253011638bb8d0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/edfebd059add4e5db9253011638bb8d0 2024-11-20T17:24:05,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ba6f95a5ef654efe95f885c7e100e2cf to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ba6f95a5ef654efe95f885c7e100e2cf 2024-11-20T17:24:05,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/22a4d149ca5c41ce990f90987f077677 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/22a4d149ca5c41ce990f90987f077677 2024-11-20T17:24:05,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/98a1309976f2422bb4350ffca766e005 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/98a1309976f2422bb4350ffca766e005 2024-11-20T17:24:05,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/91ee9e14885343d2ae54e3b8241f0142 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/91ee9e14885343d2ae54e3b8241f0142 2024-11-20T17:24:05,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/fe7bfeec0bdc46f4913720792ca41649 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/fe7bfeec0bdc46f4913720792ca41649 2024-11-20T17:24:05,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/5e85a50a879d47f5963048fc7813f3be to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/5e85a50a879d47f5963048fc7813f3be 2024-11-20T17:24:05,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/dfd19814254a4b6ab7ae7fcf0082a8f5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/dfd19814254a4b6ab7ae7fcf0082a8f5 2024-11-20T17:24:05,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/54cbb4370c4140d3b57bdd5219cbc3f6 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/54cbb4370c4140d3b57bdd5219cbc3f6 2024-11-20T17:24:05,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/42d0be76c8e847c388cd51ff1dbbab70 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/42d0be76c8e847c388cd51ff1dbbab70 2024-11-20T17:24:05,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/6cb479a5bea44f8e873d657507e65e0b to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/6cb479a5bea44f8e873d657507e65e0b 2024-11-20T17:24:05,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/802f523fbed04ca8bbe7e80e3afd13b5 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/802f523fbed04ca8bbe7e80e3afd13b5 2024-11-20T17:24:05,817 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/recovered.edits/288.seqid, newMaxSeqId=288, maxSeqId=4 2024-11-20T17:24:05,818 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0. 2024-11-20T17:24:05,818 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for 47d6ffc4e6051fed3b2d9d69f1d3f3c0: 2024-11-20T17:24:05,819 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed 47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:05,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=47d6ffc4e6051fed3b2d9d69f1d3f3c0, regionState=CLOSED 2024-11-20T17:24:05,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T17:24:05,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure 47d6ffc4e6051fed3b2d9d69f1d3f3c0, server=d514dc944523,40121,1732123262111 in 1.8510 sec 2024-11-20T17:24:05,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-11-20T17:24:05,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=47d6ffc4e6051fed3b2d9d69f1d3f3c0, UNASSIGN in 1.8540 sec 2024-11-20T17:24:05,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T17:24:05,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8560 sec 2024-11-20T17:24:05,824 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123445824"}]},"ts":"1732123445824"} 2024-11-20T17:24:05,825 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:24:05,826 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:24:05,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8660 sec 2024-11-20T17:24:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:24:06,066 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T17:24:06,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:24:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:06,068 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T17:24:06,069 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:06,070 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,071 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C, FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/recovered.edits] 2024-11-20T17:24:06,073 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/18aa50a345094707805367b4babf6487 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/18aa50a345094707805367b4babf6487 2024-11-20T17:24:06,074 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/be045c3a6c4c401c9afffcc730198ce1 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/be045c3a6c4c401c9afffcc730198ce1 2024-11-20T17:24:06,075 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/fce9647aaba84b5596cca978d70272eb to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/A/fce9647aaba84b5596cca978d70272eb 2024-11-20T17:24:06,077 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/0f3e155f2ae64e84adfcdc213532361a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/0f3e155f2ae64e84adfcdc213532361a 2024-11-20T17:24:06,078 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/2431bb4a875a472ba2a5c66fa685c90a to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/2431bb4a875a472ba2a5c66fa685c90a 2024-11-20T17:24:06,079 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/f132adff399f46929a12d5c55a09724e to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/B/f132adff399f46929a12d5c55a09724e 2024-11-20T17:24:06,080 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4501f12ba0424b14807e4c5e2b2b3968 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/4501f12ba0424b14807e4c5e2b2b3968 2024-11-20T17:24:06,081 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/b27ef51f5dbd4f64963908e9806ed157 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/b27ef51f5dbd4f64963908e9806ed157 2024-11-20T17:24:06,082 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ef6e98f807ee4cc9aa92822f6e2c3dce to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/C/ef6e98f807ee4cc9aa92822f6e2c3dce 2024-11-20T17:24:06,084 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/recovered.edits/288.seqid to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0/recovered.edits/288.seqid 2024-11-20T17:24:06,084 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/default/TestAcidGuarantees/47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,084 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:24:06,085 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:24:06,085 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T17:24:06,087 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200c8f88ff9174446b8bec9c546b579b10_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200c8f88ff9174446b8bec9c546b579b10_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,088 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112013267438c1b64346b6647ef407d569c8_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112013267438c1b64346b6647ef407d569c8_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,089 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201a41803c31fb42e083e1b68b775e51c7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201a41803c31fb42e083e1b68b775e51c7_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,090 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201da8d512a49645a8856b8d94b64f8e69_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201da8d512a49645a8856b8d94b64f8e69_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,090 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112023491bce2bb241e9bc33898113e03a41_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112023491bce2bb241e9bc33898113e03a41_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,091 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120388997edebb54fd183c4a326a3c12e5e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120388997edebb54fd183c4a326a3c12e5e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,092 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112049fb0261b05044e6adf46842eda72ecd_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112049fb0261b05044e6adf46842eda72ecd_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,093 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205b36c048fe8b4919b8d79a41bff6f786_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205b36c048fe8b4919b8d79a41bff6f786_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,093 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061aca7abfa5940449940bd154c14681b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061aca7abfa5940449940bd154c14681b_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,094 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112067cb4210a3804c0096f9dec52bd5ce0e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112067cb4210a3804c0096f9dec52bd5ce0e_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,095 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206ae9738c05de4e03913c2f1c8eef821f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206ae9738c05de4e03913c2f1c8eef821f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,096 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112072663f35cfa949c7a4686c91f2b9de1f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112072663f35cfa949c7a4686c91f2b9de1f_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,097 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088c59f300a2f48928cd41ce51f32cbcb_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088c59f300a2f48928cd41ce51f32cbcb_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,098 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a145bd79a97348f0a15800c6bda6b8b6_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a145bd79a97348f0a15800c6bda6b8b6_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,099 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aeeb15f3e7844e21862c2b35f39d76ea_47d6ffc4e6051fed3b2d9d69f1d3f3c0 to hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aeeb15f3e7844e21862c2b35f39d76ea_47d6ffc4e6051fed3b2d9d69f1d3f3c0 2024-11-20T17:24:06,099 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:24:06,101 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:06,103 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:24:06,104 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:24:06,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:06,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:24:06,105 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123446105"}]},"ts":"9223372036854775807"} 2024-11-20T17:24:06,107 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:24:06,107 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 47d6ffc4e6051fed3b2d9d69f1d3f3c0, NAME => 'TestAcidGuarantees,,1732123412607.47d6ffc4e6051fed3b2d9d69f1d3f3c0.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:24:06,107 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:24:06,107 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123446107"}]},"ts":"9223372036854775807"} 2024-11-20T17:24:06,108 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:24:06,110 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:06,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 43 msec 2024-11-20T17:24:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38505 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T17:24:06,169 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T17:24:06,178 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237 (was 238), OpenFileDescriptor=447 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=264 (was 267), ProcessCount=11 (was 11), AvailableMemoryMB=6126 (was 6141) 2024-11-20T17:24:06,178 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T17:24:06,178 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:24:06,179 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:55266 2024-11-20T17:24:06,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:06,179 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T17:24:06,179 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1173057206, stopped=false 2024-11-20T17:24:06,179 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=d514dc944523,38505,1732123261383 2024-11-20T17:24:06,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T17:24:06,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T17:24:06,181 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T17:24:06,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:06,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:06,181 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:06,181 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'd514dc944523,40121,1732123262111' ***** 2024-11-20T17:24:06,181 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T17:24:06,181 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:24:06,182 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:24:06,182 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T17:24:06,182 INFO [RS:0;d514dc944523:40121 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T17:24:06,182 INFO [RS:0;d514dc944523:40121 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T17:24:06,182 INFO [RS:0;d514dc944523:40121 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(3579): Received CLOSE for 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1224): stopping server d514dc944523,40121,1732123262111 2024-11-20T17:24:06,183 DEBUG [RS:0;d514dc944523:40121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T17:24:06,183 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 5ae1ceb1863550e6bded974b57fd057c, disabling compactions & flushes 2024-11-20T17:24:06,183 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:24:06,183 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:24:06,183 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. after waiting 0 ms 2024-11-20T17:24:06,183 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:24:06,183 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 5ae1ceb1863550e6bded974b57fd057c 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T17:24:06,183 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T17:24:06,183 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1603): Online Regions={5ae1ceb1863550e6bded974b57fd057c=hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T17:24:06,184 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T17:24:06,184 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T17:24:06,184 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T17:24:06,184 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T17:24:06,184 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T17:24:06,184 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T17:24:06,187 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:24:06,189 INFO [regionserver/d514dc944523:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T17:24:06,200 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/.tmp/info/2ffb348430d54a76a3a00546dbb931a1 is 45, key is default/info:d/1732123266633/Put/seqid=0 2024-11-20T17:24:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742411_1587 (size=5037) 2024-11-20T17:24:06,205 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/info/d24b9d9974654728b4fc5b17be001dd7 is 143, key is hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c./info:regioninfo/1732123266510/Put/seqid=0 2024-11-20T17:24:06,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742412_1588 (size=7725) 2024-11-20T17:24:06,387 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:24:06,587 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5ae1ceb1863550e6bded974b57fd057c 2024-11-20T17:24:06,604 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/.tmp/info/2ffb348430d54a76a3a00546dbb931a1 2024-11-20T17:24:06,607 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/.tmp/info/2ffb348430d54a76a3a00546dbb931a1 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/info/2ffb348430d54a76a3a00546dbb931a1 2024-11-20T17:24:06,609 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/info/d24b9d9974654728b4fc5b17be001dd7 2024-11-20T17:24:06,610 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/info/2ffb348430d54a76a3a00546dbb931a1, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T17:24:06,610 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 5ae1ceb1863550e6bded974b57fd057c in 427ms, sequenceid=6, compaction requested=false 2024-11-20T17:24:06,614 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/namespace/5ae1ceb1863550e6bded974b57fd057c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T17:24:06,614 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:24:06,614 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 5ae1ceb1863550e6bded974b57fd057c: 2024-11-20T17:24:06,614 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732123265279.5ae1ceb1863550e6bded974b57fd057c. 2024-11-20T17:24:06,627 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/rep_barrier/7063e87a90f749d492d132cae5cccdbc is 102, key is TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2./rep_barrier:/1732123292313/DeleteFamily/seqid=0 2024-11-20T17:24:06,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742413_1589 (size=6025) 2024-11-20T17:24:06,788 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:24:06,919 INFO [regionserver/d514dc944523:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T17:24:06,919 INFO [regionserver/d514dc944523:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T17:24:06,988 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:24:07,030 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/rep_barrier/7063e87a90f749d492d132cae5cccdbc 2024-11-20T17:24:07,048 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/table/737b5a44b04d4f0db8b72241a757a21e is 96, key is TestAcidGuarantees,,1732123266865.d436a1ae301ec26cf78d29bd05a18bd2./table:/1732123292313/DeleteFamily/seqid=0 2024-11-20T17:24:07,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742414_1590 (size=5942) 2024-11-20T17:24:07,188 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-20T17:24:07,188 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T17:24:07,188 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:24:07,388 DEBUG [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:24:07,452 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/table/737b5a44b04d4f0db8b72241a757a21e 2024-11-20T17:24:07,455 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/info/d24b9d9974654728b4fc5b17be001dd7 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/info/d24b9d9974654728b4fc5b17be001dd7 2024-11-20T17:24:07,458 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/info/d24b9d9974654728b4fc5b17be001dd7, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T17:24:07,458 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/rep_barrier/7063e87a90f749d492d132cae5cccdbc as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/rep_barrier/7063e87a90f749d492d132cae5cccdbc 2024-11-20T17:24:07,461 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/rep_barrier/7063e87a90f749d492d132cae5cccdbc, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T17:24:07,462 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/.tmp/table/737b5a44b04d4f0db8b72241a757a21e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/table/737b5a44b04d4f0db8b72241a757a21e 2024-11-20T17:24:07,464 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/table/737b5a44b04d4f0db8b72241a757a21e, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T17:24:07,465 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1281ms, sequenceid=93, compaction requested=false 2024-11-20T17:24:07,468 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T17:24:07,469 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T17:24:07,469 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T17:24:07,469 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T17:24:07,469 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T17:24:07,589 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1250): stopping server d514dc944523,40121,1732123262111; all regions closed. 2024-11-20T17:24:07,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741834_1010 (size=26050) 2024-11-20T17:24:07,595 DEBUG [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/oldWALs 2024-11-20T17:24:07,595 INFO [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL d514dc944523%2C40121%2C1732123262111.meta:.meta(num 1732123265033) 2024-11-20T17:24:07,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741832_1008 (size=13491547) 2024-11-20T17:24:07,598 DEBUG [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/oldWALs 2024-11-20T17:24:07,598 INFO [RS:0;d514dc944523:40121 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL d514dc944523%2C40121%2C1732123262111:(num 1732123264084) 2024-11-20T17:24:07,598 DEBUG [RS:0;d514dc944523:40121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:07,598 INFO [RS:0;d514dc944523:40121 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T17:24:07,599 INFO [RS:0;d514dc944523:40121 {}] hbase.ChoreService(370): Chore service for: regionserver/d514dc944523:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-20T17:24:07,599 INFO [regionserver/d514dc944523:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T17:24:07,599 INFO [RS:0;d514dc944523:40121 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40121 2024-11-20T17:24:07,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d514dc944523,40121,1732123262111 2024-11-20T17:24:07,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T17:24:07,604 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007fc9448f32b0@1d5941e7 rejected from java.util.concurrent.ThreadPoolExecutor@585bdc28[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-20T17:24:07,604 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d514dc944523,40121,1732123262111] 2024-11-20T17:24:07,605 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing d514dc944523,40121,1732123262111; numProcessing=1 2024-11-20T17:24:07,606 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/d514dc944523,40121,1732123262111 already deleted, retry=false 2024-11-20T17:24:07,606 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; d514dc944523,40121,1732123262111 expired; onlineServers=0 2024-11-20T17:24:07,606 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'd514dc944523,38505,1732123261383' ***** 2024-11-20T17:24:07,606 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T17:24:07,606 DEBUG [M:0;d514dc944523:38505 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d9287db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d514dc944523/172.17.0.2:0 2024-11-20T17:24:07,606 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegionServer(1224): stopping server d514dc944523,38505,1732123261383 2024-11-20T17:24:07,606 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegionServer(1250): stopping server d514dc944523,38505,1732123261383; all regions closed. 2024-11-20T17:24:07,607 DEBUG [M:0;d514dc944523:38505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:07,607 DEBUG [M:0;d514dc944523:38505 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T17:24:07,607 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T17:24:07,607 DEBUG [M:0;d514dc944523:38505 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T17:24:07,607 DEBUG [master/d514dc944523:0:becomeActiveMaster-HFileCleaner.small.0-1732123263792 {}] cleaner.HFileCleaner(306): Exit Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.small.0-1732123263792,5,FailOnTimeoutGroup] 2024-11-20T17:24:07,607 DEBUG [master/d514dc944523:0:becomeActiveMaster-HFileCleaner.large.0-1732123263791 {}] cleaner.HFileCleaner(306): Exit Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.large.0-1732123263791,5,FailOnTimeoutGroup] 2024-11-20T17:24:07,607 INFO [M:0;d514dc944523:38505 {}] hbase.ChoreService(370): Chore service for: master/d514dc944523:0 had [] on shutdown 2024-11-20T17:24:07,607 DEBUG [M:0;d514dc944523:38505 {}] master.HMaster(1733): Stopping service threads 2024-11-20T17:24:07,607 INFO [M:0;d514dc944523:38505 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T17:24:07,607 ERROR [M:0;d514dc944523:38505 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T17:24:07,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T17:24:07,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:07,608 INFO [M:0;d514dc944523:38505 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T17:24:07,608 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T17:24:07,608 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T17:24:07,608 DEBUG [M:0;d514dc944523:38505 {}] zookeeper.ZKUtil(347): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T17:24:07,608 WARN [M:0;d514dc944523:38505 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T17:24:07,608 INFO [M:0;d514dc944523:38505 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T17:24:07,609 INFO [M:0;d514dc944523:38505 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T17:24:07,609 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T17:24:07,609 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:07,609 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:07,609 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T17:24:07,609 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:07,609 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=746.44 KB heapSize=916.40 KB 2024-11-20T17:24:07,624 DEBUG [M:0;d514dc944523:38505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a7b137f58e64d22b9bd4d29af707e6a is 82, key is hbase:meta,,1/info:regioninfo/1732123265169/Put/seqid=0 2024-11-20T17:24:07,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742415_1591 (size=5672) 2024-11-20T17:24:07,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:24:07,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40121-0x10015f32f730001, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:24:07,706 INFO [RS:0;d514dc944523:40121 {}] regionserver.HRegionServer(1307): Exiting; stopping=d514dc944523,40121,1732123262111; zookeeper connection closed. 2024-11-20T17:24:07,706 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34b0e862 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34b0e862 2024-11-20T17:24:07,706 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T17:24:08,028 INFO [M:0;d514dc944523:38505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2092 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a7b137f58e64d22b9bd4d29af707e6a 2024-11-20T17:24:08,056 DEBUG [M:0;d514dc944523:38505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e8411d5b0d1a44999ee3cc269a770b2e is 2277, key is \x00\x00\x00\x00\x00\x00\x00\x96/proc:d/1732123415624/Put/seqid=0 2024-11-20T17:24:08,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742416_1592 (size=43964) 2024-11-20T17:24:08,460 INFO [M:0;d514dc944523:38505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=745.88 KB at sequenceid=2092 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e8411d5b0d1a44999ee3cc269a770b2e 2024-11-20T17:24:08,463 INFO [M:0;d514dc944523:38505 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e8411d5b0d1a44999ee3cc269a770b2e 2024-11-20T17:24:08,478 DEBUG [M:0;d514dc944523:38505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a668bbcdb45a43988ff940af25ccd126 is 69, key is d514dc944523,40121,1732123262111/rs:state/1732123263848/Put/seqid=0 2024-11-20T17:24:08,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742417_1593 (size=5156) 2024-11-20T17:24:08,882 INFO [M:0;d514dc944523:38505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2092 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a668bbcdb45a43988ff940af25ccd126 2024-11-20T17:24:08,886 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a7b137f58e64d22b9bd4d29af707e6a as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a7b137f58e64d22b9bd4d29af707e6a 2024-11-20T17:24:08,888 INFO [M:0;d514dc944523:38505 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a7b137f58e64d22b9bd4d29af707e6a, entries=8, sequenceid=2092, filesize=5.5 K 2024-11-20T17:24:08,889 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e8411d5b0d1a44999ee3cc269a770b2e as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e8411d5b0d1a44999ee3cc269a770b2e 2024-11-20T17:24:08,891 INFO [M:0;d514dc944523:38505 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e8411d5b0d1a44999ee3cc269a770b2e 2024-11-20T17:24:08,891 INFO [M:0;d514dc944523:38505 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e8411d5b0d1a44999ee3cc269a770b2e, entries=173, sequenceid=2092, filesize=42.9 K 2024-11-20T17:24:08,891 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a668bbcdb45a43988ff940af25ccd126 as hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a668bbcdb45a43988ff940af25ccd126 2024-11-20T17:24:08,894 INFO [M:0;d514dc944523:38505 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41637/user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a668bbcdb45a43988ff940af25ccd126, entries=1, sequenceid=2092, filesize=5.0 K 2024-11-20T17:24:08,894 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegion(3040): Finished flush of dataSize ~746.44 KB/764354, heapSize ~916.10 KB/938088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1285ms, sequenceid=2092, compaction requested=false 2024-11-20T17:24:08,895 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:08,896 DEBUG [M:0;d514dc944523:38505 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T17:24:08,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741830_1006 (size=901549) 2024-11-20T17:24:08,897 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/6706ebd9-887a-de0b-c87b-3b660f28f8c0/MasterData/WALs/d514dc944523,38505,1732123261383/d514dc944523%2C38505%2C1732123261383.1732123263284 not finished, retry = 0 2024-11-20T17:24:08,998 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T17:24:08,998 INFO [M:0;d514dc944523:38505 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T17:24:08,999 INFO [M:0;d514dc944523:38505 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38505 2024-11-20T17:24:09,000 DEBUG [M:0;d514dc944523:38505 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/d514dc944523,38505,1732123261383 already deleted, retry=false 2024-11-20T17:24:09,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:24:09,102 INFO [M:0;d514dc944523:38505 {}] regionserver.HRegionServer(1307): Exiting; stopping=d514dc944523,38505,1732123261383; zookeeper connection closed. 2024-11-20T17:24:09,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38505-0x10015f32f730000, quorum=127.0.0.1:55266, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:24:09,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T17:24:09,109 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T17:24:09,109 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T17:24:09,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T17:24:09,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/hadoop.log.dir/,STOPPED} 2024-11-20T17:24:09,113 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T17:24:09,113 WARN [BP-301989904-172.17.0.2-1732123258485 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T17:24:09,113 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T17:24:09,113 WARN [BP-301989904-172.17.0.2-1732123258485 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-301989904-172.17.0.2-1732123258485 (Datanode Uuid 6fd58b4a-7db6-42c8-a040-22f97fcd7c4e) service to localhost/127.0.0.1:41637 2024-11-20T17:24:09,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/dfs/data/data1/current/BP-301989904-172.17.0.2-1732123258485 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T17:24:09,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/cluster_0914e0c7-bed1-0a2a-84e6-4611fbce9121/dfs/data/data2/current/BP-301989904-172.17.0.2-1732123258485 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T17:24:09,116 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T17:24:09,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T17:24:09,123 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T17:24:09,124 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T17:24:09,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T17:24:09,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0b2b9d67-2b66-a3f2-eea9-49157503e4e9/hadoop.log.dir/,STOPPED} 2024-11-20T17:24:09,139 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T17:24:09,263 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down